Compare commits

...

11 Commits

Author SHA1 Message Date
8be23a2bb2 Add address-based lower bound to get_confirmed_signatures_for_address2 loop (#11426) (#11432)
(cherry picked from commit 5530ee4c95)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-07 00:06:44 +00:00
4ff9a6910d Fix blockstore empty panic (#11423) (#11430)
* Add panicking test

* Add failing test: fresh transaction-status column shouldn't point at valid root 0

* Prevent transaction status match outside of primary-index bounds

* Initialize transaction-status and address-signature primer entries with Slot::MAX

* Revert "Add failing test: fresh transaction-status column shouldn't point at valid root 0"

This reverts commit cbad2a9fae.

* Revert "Initialize transaction-status and address-signature primer entries with Slot::MAX"

This reverts commit ffaeac0669.

(cherry picked from commit 1061b50665)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-06 23:39:01 +00:00
fd192e3641 Link fix (#11368) (#11425)
* fixes logo

* cleans up homepage on docs

* adds icon files and tightens margins

* cleans up sidenav, adds top nav items

* fixes a link

* removes icon files

Co-authored-by: Dan Albert <dan@solana.com>
(cherry picked from commit 14dcaaee6c)

Co-authored-by: Raj Gokal <rajivgokal@gmail.com>
2020-08-06 19:37:04 +00:00
a8de467ef8 Realloc not supported (#11418)
(cherry picked from commit bc4c5c5a97)

Co-authored-by: Jack May <jack@solana.com>
2020-08-06 16:24:14 +00:00
1a186beb5c Update lib.rs
(cherry picked from commit 5a63c9d535)
2020-08-06 07:58:05 -07:00
66a21ed65e Enable cross program support in mainnet-beta epoch 63
(cherry picked from commit c9b1d08218)
2020-08-06 07:58:05 -07:00
1a42a40492 RPC: Plug getConfirmedSignaturesForAddress2 into bigtable storage (bp #11395) (#11406)
* Plug getConfirmedSignaturesForAddress2 into bigtable storage

(cherry picked from commit 4222932e08)

* Upgrade help description

(cherry picked from commit 9abb7db5f8)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-06 07:14:46 +00:00
5841e4d665 Long-term ledger storage with BigTable (bp #11222) (#11392)
* ledger-storage-bigtable boilerplate

(cherry picked from commit 9d2293bb32)

* $ wget https://pki.goog/roots.pem -O pki-goog-roots.pem

(cherry picked from commit 1617a025ce)

* Add access_token module

(cherry picked from commit 59d266a111)

* Add root_ca_certificate

(cherry picked from commit faa016e4b7)

* Add build-proto

(cherry picked from commit c31e1f5bf0)

* UiTransactionEncoding is now copy

(cherry picked from commit 494968be66)

* Increase timeout

(cherry picked from commit 57dfebc5ba)

* Add build-proto/build.sh output

(cherry picked from commit 54dae6ba2c)

* Supress doctest errors

(cherry picked from commit 019c75797d)

* Add compression

(cherry picked from commit 243e05d59f)

* Add bigtable

(cherry picked from commit 6e0353965a)

* Add configuration info

(cherry picked from commit 98cca1e774)

* Add ledger-tool bigtable subcommands

(cherry picked from commit f9049d6ee4)

# Conflicts:
#	ledger-tool/Cargo.toml

* Make room for tokio 0.2

(cherry picked from commit b876fb84ba)

# Conflicts:
#	core/Cargo.toml

* Setup a tokio 0.2 runtime for RPC usage

(cherry picked from commit 0e02740565)

# Conflicts:
#	core/Cargo.toml

* Plumb Bigtable ledger storage into the RPC subsystem

(cherry picked from commit dfae9a9864)

# Conflicts:
#	core/Cargo.toml

* Add RPC transaction history design

(cherry picked from commit e56ea138c7)

* Simplify access token refreshing

(cherry picked from commit 1f7af14386)

* Report block status more frequently

(cherry picked from commit 22c46ebf96)

* after -> before

(cherry picked from commit 227ea934ff)

* Rebase

* Cargo.lock

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-06 04:06:44 +00:00
6542a04521 Mark token-specific rpcs as unstable (#11402)
(cherry picked from commit 7430896c79)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-08-06 04:00:18 +00:00
5c27009758 Force program address off the curve (#11323) (#11398)
(cherry picked from commit 03263c850a)

Co-authored-by: Jack May <jack@solana.com>
2020-08-06 01:00:42 +00:00
888f3522d8 Add getConfirmedSignaturesForAddress2 RPC method (bp #11259) (#11394)
* Add getConfirmedSignaturesForAddress2 RPC method

(cherry picked from commit 1b2276520b)

* Reimplement transaction-history command with getConfirmedSignaturesForAddress2

(cherry picked from commit 087fd32ce3)

* Rework get_confirmed_signatures_for_address2

(cherry picked from commit a11f137810)

* Rename startAfter to before

(cherry picked from commit 02c0981ecf)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-05 22:47:50 +00:00
62 changed files with 7225 additions and 254 deletions

791
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -26,6 +26,7 @@ members = [
"log-analyzer",
"merkle-tree",
"stake-o-matic",
"storage-bigtable",
"streamer",
"measure",
"metrics",

View File

@ -7,7 +7,7 @@ source multinode-demo/common.sh
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
timeout 15 ./run.sh &
timeout 120 ./run.sh &
pid=$!
attempts=20

View File

@ -231,8 +231,8 @@ pub enum CliCommand {
TotalSupply,
TransactionHistory {
address: Pubkey,
end_slot: Option<Slot>, // None == latest slot
slot_limit: Option<u64>, // None == search full history
before: Option<Signature>,
limit: usize,
},
// Nonce commands
AuthorizeNonceAccount {
@ -1871,9 +1871,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::TotalSupply => process_total_supply(&rpc_client, config),
CliCommand::TransactionHistory {
address,
end_slot,
slot_limit,
} => process_transaction_history(&rpc_client, address, *end_slot, *slot_limit),
before,
limit,
} => process_transaction_history(&rpc_client, config, address, *before, *limit),
// Nonce Commands

View File

@ -13,7 +13,6 @@ use solana_client::{
pubsub_client::{PubsubClient, SlotInfoMessage},
rpc_client::RpcClient,
rpc_config::{RpcLargestAccountsConfig, RpcLargestAccountsFilter},
rpc_request::MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
@ -24,6 +23,7 @@ use solana_sdk::{
message::Message,
native_token::lamports_to_sol,
pubkey::{self, Pubkey},
signature::Signature,
system_instruction, system_program,
sysvar::{
self,
@ -257,9 +257,8 @@ impl ClusterQuerySubCommands for App<'_, '_> {
)
.subcommand(
SubCommand::with_name("transaction-history")
.about("Show historical transactions affecting the given address, \
ordered based on the slot in which they were confirmed in \
from lowest to highest slot")
.about("Show historical transactions affecting the given address \
from newest to oldest")
.arg(
pubkey!(Arg::with_name("address")
.index(1)
@ -267,26 +266,22 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.required(true),
"Account address"),
)
.arg(
Arg::with_name("end_slot")
.takes_value(false)
.value_name("SLOT")
.index(2)
.validator(is_slot)
.help(
"Slot to start from [default: latest slot at maximum commitment]"
),
)
.arg(
Arg::with_name("limit")
.long("limit")
.takes_value(true)
.value_name("NUMBER OF SLOTS")
.value_name("LIMIT")
.validator(is_slot)
.help(
"Limit the search to this many slots"
),
),
.default_value("1000")
.help("Maximum number of transaction signatures to return"),
)
.arg(
Arg::with_name("before")
.long("before")
.value_name("TRANSACTION_SIGNATURE")
.takes_value(true)
.help("Start with the first signature older than this one"),
)
)
}
}
@ -440,14 +435,22 @@ pub fn parse_transaction_history(
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let address = pubkey_of_signer(matches, "address", wallet_manager)?.unwrap();
let end_slot = value_t!(matches, "end_slot", Slot).ok();
let slot_limit = value_t!(matches, "limit", u64).ok();
let before = match matches.value_of("before") {
Some(signature) => Some(
signature
.parse()
.map_err(|err| CliError::BadParameter(format!("Invalid signature: {}", err)))?,
),
None => None,
};
let limit = value_t_or_exit!(matches, "limit", usize);
Ok(CliCommandInfo {
command: CliCommand::TransactionHistory {
address,
end_slot,
slot_limit,
before,
limit,
},
signers: vec![],
})
@ -1305,41 +1308,36 @@ pub fn process_show_validators(
pub fn process_transaction_history(
rpc_client: &RpcClient,
config: &CliConfig,
address: &Pubkey,
end_slot: Option<Slot>, // None == use latest slot
slot_limit: Option<u64>,
before: Option<Signature>,
limit: usize,
) -> ProcessResult {
let end_slot = {
if let Some(end_slot) = end_slot {
end_slot
let results = rpc_client.get_confirmed_signatures_for_address2_with_config(
address,
before,
Some(limit),
)?;
let transactions_found = format!("{} transactions found", results.len());
for result in results {
if config.verbose {
println!(
"{} [slot={} status={}] {}",
result.signature,
result.slot,
match result.err {
None => "Confirmed".to_string(),
Some(err) => format!("Failed: {:?}", err),
},
result.memo.unwrap_or_else(|| "".to_string()),
);
} else {
rpc_client.get_slot_with_commitment(CommitmentConfig::max())?
println!("{}", result.signature);
}
};
let mut start_slot = match slot_limit {
Some(slot_limit) => end_slot.saturating_sub(slot_limit),
None => rpc_client.minimum_ledger_slot()?,
};
println!(
"Transactions affecting {} within slots [{},{}]",
address, start_slot, end_slot
);
let mut transaction_count = 0;
while start_slot < end_slot {
let signatures = rpc_client.get_confirmed_signatures_for_address(
address,
start_slot,
(start_slot + MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE).min(end_slot),
)?;
for signature in &signatures {
println!("{}", signature);
}
transaction_count += signatures.len();
start_slot += MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE;
}
Ok(format!("{} transactions found", transaction_count))
Ok(transactions_found)
}
#[cfg(test)]

View File

@ -2,7 +2,10 @@ use crate::{
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
http_sender::HttpSender,
mock_sender::{MockSender, Mocks},
rpc_config::{RpcLargestAccountsConfig, RpcSendTransactionConfig, RpcTokenAccountsFilter},
rpc_config::{
RpcGetConfirmedSignaturesForAddress2Config, RpcLargestAccountsConfig,
RpcSendTransactionConfig, RpcTokenAccountsFilter,
},
rpc_request::{RpcError, RpcRequest, TokenAccountsFilter},
rpc_response::*,
rpc_sender::RpcSender,
@ -289,6 +292,32 @@ impl RpcClient {
Ok(signatures)
}
pub fn get_confirmed_signatures_for_address2(
&self,
address: &Pubkey,
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
self.get_confirmed_signatures_for_address2_with_config(address, None, None)
}
pub fn get_confirmed_signatures_for_address2_with_config(
&self,
address: &Pubkey,
before: Option<Signature>,
limit: Option<usize>,
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
let config = RpcGetConfirmedSignaturesForAddress2Config {
before: before.map(|signature| signature.to_string()),
limit,
};
let result: Vec<RpcConfirmedTransactionStatusWithSignature> = self.send(
RpcRequest::GetConfirmedSignaturesForAddress2,
json!([address.to_string(), config]),
)?;
Ok(result)
}
pub fn get_confirmed_transaction(
&self,
signature: &Signature,

View File

@ -65,3 +65,10 @@ pub enum RpcTokenAccountsFilter {
Mint(String),
ProgramId(String),
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcGetConfirmedSignaturesForAddress2Config {
pub before: Option<String>, // Signature as base-58 string
pub limit: Option<usize>,
}

View File

@ -14,6 +14,7 @@ pub enum RpcRequest {
GetConfirmedBlock,
GetConfirmedBlocks,
GetConfirmedSignaturesForAddress,
GetConfirmedSignaturesForAddress2,
GetConfirmedTransaction,
GetEpochInfo,
GetEpochSchedule,
@ -65,6 +66,7 @@ impl fmt::Display for RpcRequest {
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
RpcRequest::GetConfirmedSignaturesForAddress => "getConfirmedSignaturesForAddress",
RpcRequest::GetConfirmedSignaturesForAddress2 => "getConfirmedSignaturesForAddress2",
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
RpcRequest::GetEpochInfo => "getEpochInfo",
RpcRequest::GetEpochSchedule => "getEpochSchedule",
@ -112,6 +114,7 @@ pub const NUM_LARGEST_ACCOUNTS: usize = 20;
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
pub const MAX_GET_CONFIRMED_BLOCKS_RANGE: u64 = 500_000;
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT: usize = 1_000;
// Validators that are this number of slots behind are considered delinquent
pub const DELINQUENT_VALIDATOR_SLOT_DISTANCE: u64 = 128;

View File

@ -6,6 +6,7 @@ use solana_sdk::{
inflation::Inflation,
transaction::{Result, TransactionError},
};
use solana_transaction_status::ConfirmedTransactionStatusWithSignature;
use std::{collections::HashMap, net::SocketAddr};
pub type RpcResult<T> = client_error::Result<Response<T>>;
@ -236,3 +237,29 @@ pub struct RpcTokenAccountBalance {
#[serde(flatten)]
pub amount: RpcTokenAmount,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcConfirmedTransactionStatusWithSignature {
pub signature: String,
pub slot: Slot,
pub err: Option<TransactionError>,
pub memo: Option<String>,
}
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
fn from(value: ConfirmedTransactionStatusWithSignature) -> Self {
let ConfirmedTransactionStatusWithSignature {
signature,
slot,
err,
memo,
} = value;
Self {
signature: signature.to_string(),
slot,
err,
memo,
}
}
}

View File

@ -61,6 +61,7 @@ solana-runtime = { path = "../runtime", version = "1.3.0" }
solana-sdk = { path = "../sdk", version = "1.3.0" }
solana-sdk-macro-frozen-abi = { path = "../sdk/macro-frozen-abi", version = "1.3.0" }
solana-stake-program = { path = "../programs/stake", version = "1.3.0" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.3.0" }
solana-streamer = { path = "../streamer", version = "1.3.0" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.3.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.0" }
@ -70,11 +71,11 @@ solana-vote-signer = { path = "../vote-signer", version = "1.3.0" }
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
tempfile = "3.1.0"
thiserror = "1.0"
tokio = "0.1"
tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
tokio_01 = { version = "0.1", package = "tokio" }
tokio_fs_01 = { version = "0.1", package = "tokio-fs" }
tokio_io_01 = { version = "0.1", package = "tokio-io" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.3.0" }
tokio = { version = "0.2.22", features = ["full"] }
trees = "0.2.1"
[dev-dependencies]

View File

@ -17,6 +17,7 @@ use solana_client::{
rpc_filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType},
rpc_request::{
TokenAccountsFilter, DELINQUENT_VALIDATOR_SLOT_DISTANCE, MAX_GET_CONFIRMED_BLOCKS_RANGE,
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT,
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, NUM_LARGEST_ACCOUNTS,
},
@ -66,6 +67,7 @@ use std::{
Arc, Mutex, RwLock,
},
};
use tokio::runtime;
fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> {
let context = RpcResponseContext { slot: bank.slot() };
@ -90,6 +92,7 @@ pub struct JsonRpcConfig {
pub identity_pubkey: Pubkey,
pub faucet_addr: Option<SocketAddr>,
pub health_check_slot_distance: u64,
pub enable_bigtable_ledger_storage: bool,
}
#[derive(Clone)]
@ -103,6 +106,8 @@ pub struct JsonRpcRequestProcessor {
cluster_info: Arc<ClusterInfo>,
genesis_hash: Hash,
transaction_sender: Arc<Mutex<Sender<TransactionInfo>>>,
runtime_handle: runtime::Handle,
bigtable_ledger_storage: Option<solana_storage_bigtable::LedgerStorage>,
}
impl Metadata for JsonRpcRequestProcessor {}
@ -157,6 +162,7 @@ impl JsonRpcRequestProcessor {
})
}
#[allow(clippy::too_many_arguments)]
pub fn new(
config: JsonRpcConfig,
bank_forks: Arc<RwLock<BankForks>>,
@ -166,6 +172,8 @@ impl JsonRpcRequestProcessor {
health: Arc<RpcHealth>,
cluster_info: Arc<ClusterInfo>,
genesis_hash: Hash,
runtime: &runtime::Runtime,
bigtable_ledger_storage: Option<solana_storage_bigtable::LedgerStorage>,
) -> (Self, Receiver<TransactionInfo>) {
let (sender, receiver) = channel();
(
@ -179,6 +187,8 @@ impl JsonRpcRequestProcessor {
cluster_info,
genesis_hash,
transaction_sender: Arc::new(Mutex::new(sender)),
runtime_handle: runtime.handle().clone(),
bigtable_ledger_storage,
},
receiver,
)
@ -217,6 +227,8 @@ impl JsonRpcRequestProcessor {
cluster_info,
genesis_hash,
transaction_sender: Arc::new(Mutex::new(sender)),
runtime_handle: runtime::Runtime::new().unwrap().handle().clone(),
bigtable_ledger_storage: None,
}
}
@ -556,6 +568,7 @@ impl JsonRpcRequestProcessor {
slot: Slot,
encoding: Option<UiTransactionEncoding>,
) -> Result<Option<ConfirmedBlock>> {
let encoding = encoding.unwrap_or(UiTransactionEncoding::Json);
if self.config.enable_rpc_transaction_history
&& slot
<= self
@ -564,7 +577,15 @@ impl JsonRpcRequestProcessor {
.unwrap()
.highest_confirmed_root()
{
let result = self.blockstore.get_confirmed_block(slot, encoding);
let result = self.blockstore.get_confirmed_block(slot, Some(encoding));
if result.is_err() {
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
return Ok(self
.runtime_handle
.block_on(bigtable_ledger_storage.get_confirmed_block(slot, encoding))
.ok());
}
}
self.check_slot_cleaned_up(&result, slot)?;
Ok(result.ok())
} else {
@ -593,9 +614,25 @@ impl JsonRpcRequestProcessor {
MAX_GET_CONFIRMED_BLOCKS_RANGE
)));
}
let lowest_slot = self.blockstore.lowest_slot();
if start_slot < lowest_slot {
// If the starting slot is lower than what's available in blockstore assume the entire
// [start_slot..end_slot] can be fetched from BigTable.
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
return Ok(self
.runtime_handle
.block_on(
bigtable_ledger_storage
.get_confirmed_blocks(start_slot, (end_slot - start_slot) as usize),
)
.unwrap_or_else(|_| vec![]));
}
}
Ok(self
.blockstore
.rooted_slot_iterator(max(start_slot, self.blockstore.lowest_slot()))
.rooted_slot_iterator(max(start_slot, lowest_slot))
.map_err(|_| Error::internal_error())?
.filter(|&slot| slot <= end_slot)
.collect())
@ -689,6 +726,16 @@ impl JsonRpcRequestProcessor {
err,
}
})
.or_else(|| {
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
self.runtime_handle
.block_on(bigtable_ledger_storage.get_signature_status(&signature))
.map(Some)
.unwrap_or(None)
} else {
None
}
})
} else {
None
};
@ -731,21 +778,38 @@ impl JsonRpcRequestProcessor {
signature: Signature,
encoding: Option<UiTransactionEncoding>,
) -> Option<ConfirmedTransaction> {
let encoding = encoding.unwrap_or(UiTransactionEncoding::Json);
if self.config.enable_rpc_transaction_history {
self.blockstore
.get_confirmed_transaction(signature, encoding)
match self
.blockstore
.get_confirmed_transaction(signature, Some(encoding))
.unwrap_or(None)
.filter(|confirmed_transaction| {
confirmed_transaction.slot
{
Some(confirmed_transaction) => {
if confirmed_transaction.slot
<= self
.block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root()
})
} else {
None
{
return Some(confirmed_transaction);
}
}
None => {
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
return self
.runtime_handle
.block_on(
bigtable_ledger_storage
.get_confirmed_transaction(&signature, encoding),
)
.unwrap_or(None);
}
}
}
}
None
}
pub fn get_confirmed_signatures_for_address(
@ -755,6 +819,8 @@ impl JsonRpcRequestProcessor {
end_slot: Slot,
) -> Vec<Signature> {
if self.config.enable_rpc_transaction_history {
// TODO: Add bigtable_ledger_storage support as a part of
// https://github.com/solana-labs/solana/pull/10928
let end_slot = min(
end_slot,
self.block_commitment_cache
@ -770,10 +836,74 @@ impl JsonRpcRequestProcessor {
}
}
pub fn get_confirmed_signatures_for_address2(
&self,
address: Pubkey,
mut before: Option<Signature>,
mut limit: usize,
) -> Result<Vec<RpcConfirmedTransactionStatusWithSignature>> {
if self.config.enable_rpc_transaction_history {
let highest_confirmed_root = self
.block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root();
let mut results = self
.blockstore
.get_confirmed_signatures_for_address2(
address,
highest_confirmed_root,
before,
limit,
)
.map_err(|err| Error::invalid_params(format!("{}", err)))?;
if results.len() < limit {
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
if !results.is_empty() {
limit -= results.len();
before = results.last().map(|x| x.signature);
}
let mut bigtable_results = self
.runtime_handle
.block_on(
bigtable_ledger_storage.get_confirmed_signatures_for_address(
&address,
before.as_ref(),
limit,
),
)
.map_err(|err| Error::invalid_params(format!("{}", err)))?;
results.append(&mut bigtable_results)
}
}
Ok(results.into_iter().map(|x| x.into()).collect())
} else {
Ok(vec![])
}
}
pub fn get_first_available_block(&self) -> Slot {
self.blockstore
let slot = self
.blockstore
.get_first_available_block()
.unwrap_or_default()
.unwrap_or_default();
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
let bigtable_slot = self
.runtime_handle
.block_on(bigtable_ledger_storage.get_first_available_block())
.unwrap_or(None)
.unwrap_or(slot);
if bigtable_slot < slot {
return bigtable_slot;
}
}
slot
}
pub fn get_stake_activation(
@ -1406,6 +1536,14 @@ pub trait RpcSol {
end_slot: Slot,
) -> Result<Vec<String>>;
#[rpc(meta, name = "getConfirmedSignaturesForAddress2")]
fn get_confirmed_signatures_for_address2(
&self,
meta: Self::Metadata,
address: String,
config: Option<RpcGetConfirmedSignaturesForAddress2Config>,
) -> Result<Vec<RpcConfirmedTransactionStatusWithSignature>>;
#[rpc(meta, name = "getFirstAvailableBlock")]
fn get_first_available_block(&self, meta: Self::Metadata) -> Result<Slot>;
@ -2036,6 +2174,34 @@ impl RpcSol for RpcSolImpl {
.collect())
}
fn get_confirmed_signatures_for_address2(
&self,
meta: Self::Metadata,
address: String,
config: Option<RpcGetConfirmedSignaturesForAddress2Config>,
) -> Result<Vec<RpcConfirmedTransactionStatusWithSignature>> {
let address = verify_pubkey(address)?;
let config = config.unwrap_or_default();
let before = if let Some(before) = config.before {
Some(verify_signature(&before)?)
} else {
None
};
let limit = config
.limit
.unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT);
if limit == 0 || limit > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT {
return Err(Error::invalid_params(format!(
"Invalid limit; max {}",
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT
)));
}
meta.get_confirmed_signatures_for_address2(address, before, limit)
}
fn get_first_available_block(&self, meta: Self::Metadata) -> Result<Slot> {
debug!("get_first_available_block rpc request received");
Ok(meta.get_first_available_block())
@ -2344,6 +2510,8 @@ pub mod tests {
RpcHealth::stub(),
cluster_info.clone(),
Hash::default(),
&runtime::Runtime::new().unwrap(),
None,
);
SendTransactionService::new(tpu_address, &bank_forks, &exit, receiver);
@ -3486,6 +3654,8 @@ pub mod tests {
RpcHealth::stub(),
cluster_info,
Hash::default(),
&runtime::Runtime::new().unwrap(),
None,
);
SendTransactionService::new(tpu_address, &bank_forks, &exit, receiver);
@ -3525,6 +3695,8 @@ pub mod tests {
health.clone(),
cluster_info,
Hash::default(),
&runtime::Runtime::new().unwrap(),
None,
);
SendTransactionService::new(tpu_address, &bank_forks, &exit, receiver);
@ -3705,6 +3877,8 @@ pub mod tests {
RpcHealth::stub(),
cluster_info,
Hash::default(),
&runtime::Runtime::new().unwrap(),
None,
);
SendTransactionService::new(tpu_address, &bank_forks, &exit, receiver);
assert_eq!(request_processor.validator_exit(), false);
@ -3732,6 +3906,8 @@ pub mod tests {
RpcHealth::stub(),
cluster_info,
Hash::default(),
&runtime::Runtime::new().unwrap(),
None,
);
SendTransactionService::new(tpu_address, &bank_forks, &exit, receiver);
assert_eq!(request_processor.validator_exit(), true);
@ -3821,6 +3997,8 @@ pub mod tests {
RpcHealth::stub(),
cluster_info,
Hash::default(),
&runtime::Runtime::new().unwrap(),
None,
);
SendTransactionService::new(tpu_address, &bank_forks, &exit, receiver);
assert_eq!(

View File

@ -23,7 +23,7 @@ use std::{
sync::{mpsc::channel, Arc, RwLock},
thread::{self, Builder, JoinHandle},
};
use tokio::prelude::Future;
use tokio::runtime;
pub struct JsonRpcService {
thread_hdl: JoinHandle<()>,
@ -32,6 +32,7 @@ pub struct JsonRpcService {
pub request_processor: JsonRpcRequestProcessor, // Used only by test_rpc_new()...
close_handle: Option<CloseHandle>,
runtime: runtime::Runtime,
}
struct RpcRequestMiddleware {
@ -97,6 +98,9 @@ impl RpcRequestMiddleware {
}
fn process_file_get(&self, path: &str) -> RequestMiddlewareAction {
// Stuck on tokio 0.1 until the jsonrpc-http-server crate upgrades to tokio 0.2
use tokio_01::prelude::*;
let stem = path.split_at(1).1; // Drop leading '/' from path
let filename = {
match path {
@ -115,10 +119,10 @@ impl RpcRequestMiddleware {
RequestMiddlewareAction::Respond {
should_validate_hosts: true,
response: Box::new(
tokio_fs::file::File::open(filename)
tokio_fs_01::file::File::open(filename)
.and_then(|file| {
let buf: Vec<u8> = Vec::new();
tokio_io::io::read_to_end(file, buf)
tokio_io_01::io::read_to_end(file, buf)
.and_then(|item| Ok(hyper::Response::new(item.1.into())))
.or_else(|_| Ok(RpcRequestMiddleware::internal_server_error()))
})
@ -249,6 +253,28 @@ impl JsonRpcService {
));
let tpu_address = cluster_info.my_contact_info().tpu;
let mut runtime = runtime::Builder::new()
.threaded_scheduler()
.thread_name("rpc-runtime")
.enable_all()
.build()
.expect("Runtime");
let bigtable_ledger_storage = if config.enable_bigtable_ledger_storage {
runtime
.block_on(solana_storage_bigtable::LedgerStorage::new(false))
.map(|x| {
info!("BigTable ledger storage initialized");
Some(x)
})
.unwrap_or_else(|err| {
error!("Failed to initialize BigTable ledger storage: {:?}", err);
None
})
} else {
None
};
let (request_processor, receiver) = JsonRpcRequestProcessor::new(
config,
bank_forks.clone(),
@ -258,6 +284,8 @@ impl JsonRpcService {
health.clone(),
cluster_info,
genesis_hash,
&runtime,
bigtable_ledger_storage,
);
let exit_send_transaction_service = Arc::new(AtomicBool::new(false));
@ -325,6 +353,7 @@ impl JsonRpcService {
.register_exit(Box::new(move || close_handle_.close()));
Self {
thread_hdl,
runtime,
#[cfg(test)]
request_processor: test_request_processor,
close_handle: Some(close_handle),
@ -338,6 +367,7 @@ impl JsonRpcService {
}
pub fn join(self) -> thread::Result<()> {
self.runtime.shutdown_background();
self.thread_hdl.join()
}
}

View File

@ -38,7 +38,9 @@ use std::{
iter,
sync::{Arc, Mutex, RwLock},
};
use tokio::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor};
// Stuck on tokio 0.1 until the jsonrpc-pubsub crate upgrades to tokio 0.2
use tokio_01::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor};
const RECEIVE_DELAY_MILLIS: u64 = 100;
@ -950,7 +952,7 @@ pub(crate) mod tests {
system_transaction,
};
use std::{fmt::Debug, sync::mpsc::channel, time::Instant};
use tokio::{prelude::FutureExt, runtime::Runtime, timer::Delay};
use tokio_01::{prelude::FutureExt, runtime::Runtime, timer::Delay};
pub(crate) fn robust_poll_or_panic<T: Debug + Send + 'static>(
receiver: futures::sync::mpsc::Receiver<T>,

View File

@ -26,7 +26,7 @@ use std::{
thread::sleep,
time::{Duration, Instant},
};
use tokio::runtime::Runtime;
use tokio_01::runtime::Runtime;
macro_rules! json_req {
($method: expr, $params: expr) => {{
@ -189,7 +189,7 @@ fn test_rpc_subscriptions() {
.and_then(move |client| {
for sig in signature_set {
let status_sender = status_sender.clone();
tokio::spawn(
tokio_01::spawn(
client
.signature_subscribe(sig.clone(), None)
.and_then(move |sig_stream| {
@ -203,7 +203,7 @@ fn test_rpc_subscriptions() {
}),
);
}
tokio::spawn(
tokio_01::spawn(
client
.slot_subscribe()
.and_then(move |slot_stream| {
@ -218,7 +218,7 @@ fn test_rpc_subscriptions() {
);
for pubkey in account_set {
let account_sender = account_sender.clone();
tokio::spawn(
tokio_01::spawn(
client
.account_subscribe(pubkey, None)
.and_then(move |account_stream| {

View File

@ -30,6 +30,16 @@ module.exports = {
label: "Validate",
position: "left",
},
{
to: "integrations/exchange",
label: "Integrate",
position: "left",
},
{
to: "cluster/overview",
label: "Learn",
position: "left",
},
{
href: "https://discordapp.com/invite/pquxPsq",
label: "Chat",

View File

@ -1,7 +1,11 @@
module.exports = {
docs: {
"Introduction": ["introduction"],
"Wallet Guide": [
"About": [
"introduction",
"terminology",
"history",
],
"Wallets": [
"wallet-guide",
{
type: "category",
@ -28,11 +32,11 @@ module.exports = {
},
"wallet-guide/support",
],
"Staking Guide": [
"Staking": [
"staking",
"staking/stake-accounts",
],
"Command Line Guide": [
"Command Line": [
"cli",
"cli/install-solana-cli-tools",
"cli/conventions",
@ -43,8 +47,7 @@ module.exports = {
"offline-signing",
"offline-signing/durable-nonce",
],
"Solana Clusters": ["clusters"],
"Develop Applications": [
"Developing": [
"apps",
"apps/rent",
"apps/hello-world",
@ -56,8 +59,8 @@ module.exports = {
"apps/javascript-api",
"apps/builtins",
],
"Integration Guides": ["integrations/exchange"],
"Run a Validator": [
"Integrating": ["integrations/exchange"],
"Validating": [
"running-validator",
"running-validator/validator-reqs",
"running-validator/validator-start",
@ -65,12 +68,13 @@ module.exports = {
"running-validator/validator-stake",
"running-validator/validator-monitor",
"running-validator/validator-info",
"running-validator/validator-troubleshoot",
],
"Tour de SOL": [
"tour-de-sol",
{
type: "category",
label: "Incenvitized Testnet",
items: [
"tour-de-sol",
{
type: "category",
label: "Registration",
items: [
"tour-de-sol/registration/how-to-register",
@ -91,10 +95,21 @@ module.exports = {
},
"tour-de-sol/useful-links",
"tour-de-sol/submitting-bugs",
],
},
"running-validator/validator-troubleshoot",
],
"Benchmark a Cluster": ["cluster/bench-tps", "cluster/performance-metrics"],
"Solana's Architecture": [
"cluster/overview",
"Clusters": [
"clusters",
"cluster/bench-tps",
"cluster/performance-metrics"
],
"Architecture": [
{
type: "category",
label: "Cluster",
items: [
"cluster/overview",
"cluster/synchronization",
"cluster/leader-rotation",
"cluster/fork-generation",
@ -102,19 +117,27 @@ module.exports = {
"cluster/turbine-block-propagation",
"cluster/vote-signing",
"cluster/stake-delegation-and-rewards",
],
"Anatomy of a Validator": [
"validator/anatomy",
"validator/tpu",
"validator/tvu",
"validator/blockstore",
"validator/gossip",
"validator/runtime",
],
Terminology: ["terminology"],
History: ["history"],
"Implemented Design Proposals": [
"implemented-proposals/implemented-proposals",
],
},
{
type: "category",
label: "Validator",
items: [
"validator/anatomy",
"validator/tpu",
"validator/tvu",
"validator/blockstore",
"validator/gossip",
"validator/runtime",
],
},
],
"Design Proposals": [
{
type: "category",
label: "Implemented",
items: [
"implemented-proposals/implemented-proposals",
{
type: "category",
label: "Economic Design",
@ -154,9 +177,13 @@ module.exports = {
"implemented-proposals/cross-program-invocation",
"implemented-proposals/program-derived-addresses",
"implemented-proposals/abi-management",
],
"Accepted Design Proposals": [
"proposals/accepted-design-proposals",
],
},
{
type: "category",
label: "Accepted",
items: [
"proposals/accepted-design-proposals",
"proposals/ledger-replication-to-implement",
"proposals/optimistic-confirmation-and-slashing",
"proposals/vote-signing-to-implement",
@ -173,6 +200,8 @@ module.exports = {
"proposals/optimistic_confirmation",
"proposals/embedding-move",
"proposals/rip-curl",
]
},
],
},
};

View File

@ -24,6 +24,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
- [getConfirmedBlock](jsonrpc-api.md#getconfirmedblock)
- [getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks)
- [getConfirmedSignaturesForAddress](jsonrpc-api.md#getconfirmedsignaturesforaddress)
- [getConfirmedSignaturesForAddress2](jsonrpc-api.md#getconfirmedsignaturesforaddress2)
- [getConfirmedTransaction](jsonrpc-api.md#getconfirmedtransaction)
- [getEpochInfo](jsonrpc-api.md#getepochinfo)
- [getEpochSchedule](jsonrpc-api.md#getepochschedule)
@ -45,10 +46,6 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
- [getSlotLeader](jsonrpc-api.md#getslotleader)
- [getStakeActivation](jsonrpc-api.md#getstakeactivation)
- [getSupply](jsonrpc-api.md#getsupply)
- [getTokenAccountBalance](jsonrpc-api.md#gettokenaccountbalance)
- [getTokenAccountsByDelegate](jsonrpc-api.md#gettokenaccountsbydelegate)
- [getTokenAccountsByOwner](jsonrpc-api.md#gettokenaccountsbyowner)
- [getTokenSupply](jsonrpc-api.md#gettokensupply)
- [getTransactionCount](jsonrpc-api.md#gettransactioncount)
- [getVersion](jsonrpc-api.md#getversion)
- [getVoteAccounts](jsonrpc-api.md#getvoteaccounts)
@ -68,6 +65,15 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
- [slotSubscribe](jsonrpc-api.md#slotsubscribe)
- [slotUnsubscribe](jsonrpc-api.md#slotunsubscribe)
## Unstable Methods
Unstable methods may see breaking changes in patch releases and may not be supported in perpetuity.
- [getTokenAccountBalance](jsonrpc-api.md#gettokenaccountbalance)
- [getTokenAccountsByDelegate](jsonrpc-api.md#gettokenaccountsbydelegate)
- [getTokenAccountsByOwner](jsonrpc-api.md#gettokenaccountsbyowner)
- [getTokenSupply](jsonrpc-api.md#gettokensupply)
## Request Formatting
To make a JSON-RPC request, send an HTTP POST request with a `Content-Type: application/json` header. The JSON request data should contain 4 fields:
@ -151,7 +157,7 @@ Returns all information associated with the account of provided Pubkey
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
#### Results:
@ -299,7 +305,7 @@ Returns identity and transaction information about a confirmed block in the ledg
#### Parameters:
- `<u64>` - slot, as u64 integer
- `<string>` - (optional) encoding for each returned Transaction, either "json", "jsonParsed", or "binary". If parameter not provided, the default encoding is JSON.
- `<string>` - (optional) encoding for each returned Transaction, either "json", "jsonParsed", or "binary". If parameter not provided, the default encoding is JSON. **jsonParsed encoding is UNSTABLE**
Parsed-JSON encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If parsed-JSON is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields).
#### Results:
@ -389,6 +395,8 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m
### getConfirmedSignaturesForAddress
**DEPRECATED: Please use getConfirmedSignaturesForAddress2 instead**
Returns a list of all the confirmed signatures for transactions involving an
address, within a specified Slot range. Max range allowed is 10,000 Slots
@ -416,6 +424,37 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m
{"jsonrpc":"2.0","result":{["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4bJdGN8Tt2kLWZ3Fa1dpwPSEkXWWTSszPSf1rRVsCwNjxbbUdwTeiWtmi8soA26YmwnKD4aAxNp8ci1Gjpdv4gsr","4LQ14a7BYY27578Uj8LPCaVhSdJGLn9DJqnUJHpy95FMqdKf9acAhUhecPQNjNUy6VoNFUbvwYkPociFSf87cWbG"]},"id":1}
```
### getConfirmedSignaturesForAddress2
Returns confirmed signatures for transactions involving an
address backwards in time from the provided signature or most recent confirmed block
#### Parameters:
* `<string>` - account address as base-58 encoded string
* `<object>` - (optional) Configuration object containing the following fields:
* `before: <string>` - (optional) start searching backwards from this transaction signature.
If not provided the search starts from the top of the highest max confirmed block.
* `limit: <number>` - (optional) maximum transaction signatures to return (between 1 and 1,000, default: 1,000).
#### Results:
The result field will be an array of transaction signature information, ordered
from newest to oldest transaction:
* `<object>`
* `signature: <string>` - transaction signature as base-58 encoded string
* `slot: <u64>` - The slot that contains the block with the transaction
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
* `memo: <string |null>` - Memo associated with the transaction, null if no memo is present
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedSignaturesForAddress2","params":["Vote111111111111111111111111111111111111111", {"limit": 1}]}' localhost:8899
// Result
{"jsonrpc":"2.0","result":[{"err":null,"memo":null,"signature":"5h6xBEauJ3PK6SWCZ1PGjBvj8vDdWG3KpwATGy1ARAXFSDwt8GFXM7W5Ncn16wmqokgpiKRLuS83KUxyZyv2sUYv","slot":114}],"id":1}
```
### getConfirmedTransaction
Returns transaction details for a confirmed transaction
@ -424,7 +463,7 @@ Returns transaction details for a confirmed transaction
- `<string>` - transaction signature as base-58 encoded string
N encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If parsed-JSON is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields).
- `<string>` - (optional) encoding for the returned Transaction, either "json", "jsonParsed", or "binary".
- `<string>` - (optional) encoding for the returned Transaction, either "json", "jsonParsed", or "binary". **jsonParsed encoding is UNSTABLE**
#### Results:
@ -805,7 +844,7 @@ Returns all accounts owned by the provided program Pubkey
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
- (optional) `filters: <array>` - filter results using various [filter objects](jsonrpc-api.md#filters); account must meet all filter criteria to be included in results
##### Filters:
@ -1022,7 +1061,7 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
### getTokenAccountBalance
Returns the token balance of an SPL Token account.
Returns the token balance of an SPL Token account. **UNSTABLE**
#### Parameters:
@ -1048,7 +1087,7 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
### getTokenAccountsByDelegate
Returns all SPL Token accounts by approved Delegate.
Returns all SPL Token accounts by approved Delegate. **UNSTABLE**
#### Parameters:
@ -1059,7 +1098,7 @@ Returns all SPL Token accounts by approved Delegate.
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
#### Results:
@ -1084,7 +1123,7 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
### getTokenAccountsByOwner
Returns all SPL Token accounts by token owner.
Returns all SPL Token accounts by token owner. **UNSTABLE**
#### Parameters:
@ -1095,7 +1134,7 @@ Returns all SPL Token accounts by token owner.
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
#### Results:
@ -1120,7 +1159,7 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
### getTokenSupply
Returns the total supply of an SPL Token type.
Returns the total supply of an SPL Token type. **UNSTABLE**
#### Parameters:
@ -1387,7 +1426,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
- `<object>` - (optional) Configuration object containing the following optional fields:
- `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
#### Results:
@ -1495,7 +1534,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`.
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
- (optional) `filters: <array>` - filter results using various [filter objects](jsonrpc-api.md#filters); account must meet all filter criteria to be included in results
#### Results:

View File

@ -1,5 +1,5 @@
---
title: History of the Solana Codebase
title: History
---
In November of 2017, Anatoly Yakovenko published a whitepaper describing Proof

View File

@ -0,0 +1,106 @@
# Long term RPC Transaction History
There's a need for RPC to serve at least 6 months of transaction history. The
current history, on the order of days, is insufficient for downstream users.
6 months of transaction data cannot be stored practically in a validator's
rocksdb ledger so an external data store is necessary. The validator's
rocksdb ledger will continue to serve as the primary data source, and then will
fall back to the external data store.
The affected RPC endpoints are:
* [getFirstAvailableBlock](https://docs.solana.com/apps/jsonrpc-api#getfirstavailableblock)
* [getConfirmedBlock](https://docs.solana.com/apps/jsonrpc-api#getconfirmedblock)
* [getConfirmedBlocks](https://docs.solana.com/apps/jsonrpc-api#getconfirmedblocks)
* [getConfirmedSignaturesForAddress](https://docs.solana.com/apps/jsonrpc-api#getconfirmedsignaturesforaddress)
* [getConfirmedTransaction](https://docs.solana.com/apps/jsonrpc-api#getconfirmedtransaction)
* [getSignatureStatuses](https://docs.solana.com/apps/jsonrpc-api#getsignaturestatuses)
Note that [getBlockTime](https://docs.solana.com/apps/jsonrpc-api#getblocktime)
is not supported, as once https://github.com/solana-labs/solana/issues/10089 is
fixed then `getBlockTime` can be removed.
Some system design constraints:
* The volume of data to store and search can quickly jump into the terabytes,
and is immutable.
* The system should be as light as possible for SREs. For example an SQL
database cluster that requires an SRE to continually monitor and rebalance
nodes is undesirable.
* Data must be searchable in real time - batched queries that take minutes or
hours to run are unacceptable.
* Easy to replicate the data worldwide to co-locate it with the RPC endpoints
that will utilize it.
* Interfacing with the external data store should be easy and not require
depending on risky lightly-used community-supported code libraries
Based on these constraints, Google's BigTable product is selected as the data
store.
## Table Schema
A BigTable instance is used to hold all transaction data, broken up into
different tables for quick searching.
New data may be copied into the instance at anytime without affecting the existing
data, and all data is immutable. Generally the expectation is that new data
will be uploaded once an current epoch completes but there is no limitation on
the frequency of data dumps.
Cleanup of old data is automatic by configuring the data retention policy of the
instance tables appropriately, it just disappears. Therefore the order of when data is
added becomes important. For example if data from epoch N-1 is added after data
from epoch N, the older epoch data will outlive the newer data. However beyond
producing _holes_ in query results, this kind of unordered deletion will
have no ill effect. Note that this method of cleanup effectively allows for an
unlimited amount of transaction data to be stored, restricted only by the
monetary costs of doing so.
The table layout s supports the existing RPC endpoints only. New RPC endpoints
in the future may require additions to the schema and potentially iterating over
all transactions to build up the necessary metadata.
## Accessing BigTable
BigTable has a gRPC endpoint that can be accessed using the
[tonic](https://crates.io/crates/crate)] and the raw protobuf API, as currently no
higher-level Rust crate for BigTable exists. Practically this makes parsing the
results of BigTable queries more complicated but is not a significant issue.
## Data Population
The ongoing population of instance data will occur on an epoch cadence through the
use of a new `solana-ledger-tool` command that will convert rocksdb data for a
given slot range into the instance schema.
The same process will be run once, manually, to backfill the existing ledger
data.
### Block Table: `block`
This table contains the compressed block data for a given slot.
The row key is generated by taking the 16 digit lower case hexadecimal
representation of the slot, to ensure that the oldest slot with a confirmed
block will always be first when the rows are listed. eg, The row key for slot
42 would be 000000000000002a.
The row data is a compressed `StoredConfirmedBlock` struct.
### Account Address Transaction Signature Lookup Table: `tx-by-addr`
This table contains the transactions that affect a given address.
The row key is `<base58
address>/<slot-id-one's-compliment-hex-slot-0-prefixed-to-16-digits>`. The row
data is a compressed `TransactionByAddrInfo` struct.
Taking the one's compliment of the slot allows for listing of slots ensures that
the newest slot with transactions that affect an address will always
be listed first.
Sysvar addresses are not indexed. However frequently used programs such as
Vote or System are, and will likely have a row for every confirmed slot.
### Transaction Signature Lookup Table: `tx`
This table maps a transaction signature to its confirmed block, and index within that block.
The row key is the base58-encoded transaction signature.
The row data is a compressed `TransactionInfo` struct.

View File

@ -13,13 +13,13 @@ const features = [
description: <>Get started building your decentralized app or marketplace.</>,
},
{
title: <> Run a Validator Node</>,
title: <>🎛 Run a Validator Node</>,
imageUrl: "running-validator",
description: <>Validate transactions, secure the network, and earn rewards.</>,
},
{
title: <>🏛 Create an SPL Token</>,
imageUrl: "tour-de-sol",
imageUrl: "https://spl.solana.com/token",
description: (
<>
Launch your own SPL Token, Solana's equivalent of ERC-20.

View File

@ -105,7 +105,11 @@ pub fn get_entered_epoch_callback(operating_mode: OperatingMode) -> EnteredEpoch
bank.add_native_program(name, program_id);
}
}
bank.set_cross_program_support(OperatingMode::Stable != operating_mode);
if OperatingMode::Stable == operating_mode {
bank.set_cross_program_support(bank.epoch() >= 63);
} else {
bank.set_cross_program_support(true);
}
})
}

View File

@ -12,22 +12,27 @@ homepage = "https://solana.com/"
bs58 = "0.3.1"
bytecount = "0.6.0"
clap = "2.33.1"
futures = "0.3.5"
futures-util = "0.3.5"
histogram = "*"
log = { version = "0.4.8" }
regex = "1"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.3.0" }
solana-cli = { path = "../cli", version = "1.3.0" }
solana-ledger = { path = "../ledger", version = "1.3.0" }
solana-logger = { path = "../logger", version = "1.3.0" }
solana-measure = { path = "../measure", version = "1.3.0" }
solana-runtime = { path = "../runtime", version = "1.3.0" }
solana-sdk = { path = "../sdk", version = "1.3.0" }
solana-stake-program = { path = "../programs/stake", version = "1.3.0" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.3.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.0" }
solana-version = { path = "../version", version = "1.3.0" }
solana-vote-program = { path = "../programs/vote", version = "1.3.0" }
tempfile = "3.1.0"
regex = "1"
tokio = { version = "0.2.22", features = ["full"] }
[dev-dependencies]
assert_cmd = "1.0"

551
ledger-tool/src/bigtable.rs Normal file
View File

@ -0,0 +1,551 @@
/// The `bigtable` subcommand
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
use log::*;
use solana_clap_utils::{
input_parsers::pubkey_of,
input_validators::{is_slot, is_valid_pubkey},
};
use solana_cli::display::println_transaction;
use solana_ledger::{blockstore::Blockstore, blockstore_db::AccessType};
use solana_measure::measure::Measure;
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature};
use solana_transaction_status::UiTransactionEncoding;
use std::{collections::HashSet, path::Path, process::exit, result::Result, time::Duration};
use tokio::time::delay_for;
// Attempt to upload this many blocks in parallel
const NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL: usize = 32;
// Read up to this many blocks from blockstore before blocking on the upload process
const BLOCK_READ_AHEAD_DEPTH: usize = NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL * 2;
async fn upload(
blockstore: Blockstore,
starting_slot: Slot,
ending_slot: Option<Slot>,
allow_missing_metadata: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let mut measure = Measure::start("entire upload");
let bigtable = solana_storage_bigtable::LedgerStorage::new(false)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
info!("Loading ledger slots...");
let blockstore_slots: Vec<_> = blockstore
.slot_meta_iterator(starting_slot)
.map_err(|err| {
format!(
"Failed to load entries starting from slot {}: {:?}",
starting_slot, err
)
})?
.filter_map(|(slot, _slot_meta)| {
if let Some(ending_slot) = &ending_slot {
if slot > *ending_slot {
return None;
}
}
Some(slot)
})
.collect();
if blockstore_slots.is_empty() {
info!("Ledger has no slots in the specified range");
return Ok(());
}
info!(
"Found {} slots in the range ({}, {})",
blockstore_slots.len(),
blockstore_slots.first().unwrap(),
blockstore_slots.last().unwrap()
);
let mut blockstore_slots_with_no_confirmed_block = HashSet::new();
// Gather the blocks that are already present in bigtable, by slot
let bigtable_slots = {
let mut bigtable_slots = vec![];
let first_blockstore_slot = *blockstore_slots.first().unwrap();
let last_blockstore_slot = *blockstore_slots.last().unwrap();
info!(
"Loading list of bigtable blocks between slots {} and {}...",
first_blockstore_slot, last_blockstore_slot
);
let mut start_slot = *blockstore_slots.first().unwrap();
while start_slot <= last_blockstore_slot {
let mut next_bigtable_slots = loop {
match bigtable.get_confirmed_blocks(start_slot, 1000).await {
Ok(slots) => break slots,
Err(err) => {
error!("get_confirmed_blocks for {} failed: {:?}", start_slot, err);
// Consider exponential backoff...
delay_for(Duration::from_secs(2)).await;
}
}
};
if next_bigtable_slots.is_empty() {
break;
}
bigtable_slots.append(&mut next_bigtable_slots);
start_slot = bigtable_slots.last().unwrap() + 1;
}
bigtable_slots
.into_iter()
.filter(|slot| *slot <= last_blockstore_slot)
.collect::<Vec<_>>()
};
// The blocks that still need to be uploaded is the difference between what's already in the
// bigtable and what's in blockstore...
let blocks_to_upload = {
let blockstore_slots = blockstore_slots.iter().cloned().collect::<HashSet<_>>();
let bigtable_slots = bigtable_slots.into_iter().collect::<HashSet<_>>();
let mut blocks_to_upload = blockstore_slots
.difference(&blockstore_slots_with_no_confirmed_block)
.cloned()
.collect::<HashSet<_>>()
.difference(&bigtable_slots)
.cloned()
.collect::<Vec<_>>();
blocks_to_upload.sort();
blocks_to_upload
};
if blocks_to_upload.is_empty() {
info!("No blocks need to be uploaded to bigtable");
return Ok(());
}
info!(
"{} blocks to be uploaded to the bucket in the range ({}, {})",
blocks_to_upload.len(),
blocks_to_upload.first().unwrap(),
blocks_to_upload.last().unwrap()
);
// Load the blocks out of blockstore in a separate thread to allow for concurrent block uploading
let (_loader_thread, receiver) = {
let (sender, receiver) = std::sync::mpsc::sync_channel(BLOCK_READ_AHEAD_DEPTH);
(
std::thread::spawn(move || {
let mut measure = Measure::start("block loader thread");
for (i, slot) in blocks_to_upload.iter().enumerate() {
let _ = match blockstore.get_confirmed_block(
*slot,
Some(solana_transaction_status::UiTransactionEncoding::Binary),
) {
Ok(confirmed_block) => sender.send((*slot, Some(confirmed_block))),
Err(err) => {
warn!(
"Failed to get load confirmed block from slot {}: {:?}",
slot, err
);
sender.send((*slot, None))
}
};
if i % NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL == 0 {
info!(
"{}% of blocks processed ({}/{})",
i * 100 / blocks_to_upload.len(),
i,
blocks_to_upload.len()
);
}
}
measure.stop();
info!("{} to load {} blocks", measure, blocks_to_upload.len());
}),
receiver,
)
};
let mut failures = 0;
use futures::stream::StreamExt;
let mut stream =
tokio::stream::iter(receiver.into_iter()).chunks(NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL);
while let Some(blocks) = stream.next().await {
let mut measure_upload = Measure::start("Upload");
let mut num_blocks = blocks.len();
info!("Preparing the next {} blocks for upload", num_blocks);
let uploads = blocks.into_iter().filter_map(|(slot, block)| match block {
None => {
blockstore_slots_with_no_confirmed_block.insert(slot);
num_blocks -= 1;
None
}
Some(confirmed_block) => {
if confirmed_block
.transactions
.iter()
.any(|transaction| transaction.meta.is_none())
{
if allow_missing_metadata {
info!("Transaction metadata missing from slot {}", slot);
} else {
panic!("Transaction metadata missing from slot {}", slot);
}
}
Some(bigtable.upload_confirmed_block(slot, confirmed_block))
}
});
for result in futures::future::join_all(uploads).await {
if result.is_err() {
error!("upload_confirmed_block() failed: {:?}", result.err());
failures += 1;
}
}
measure_upload.stop();
info!("{} for {} blocks", measure_upload, num_blocks);
}
measure.stop();
info!("{}", measure);
if failures > 0 {
Err(format!("Incomplete upload, {} operations failed", failures).into())
} else {
Ok(())
}
}
async fn first_available_block() -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(true).await?;
match bigtable.get_first_available_block().await? {
Some(block) => println!("{}", block),
None => println!("No blocks available"),
}
Ok(())
}
async fn block(slot: Slot) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(false)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
let block = bigtable
.get_confirmed_block(slot, UiTransactionEncoding::Binary)
.await?;
println!("Slot: {}", slot);
println!("Parent Slot: {}", block.parent_slot);
println!("Blockhash: {}", block.blockhash);
println!("Previous Blockhash: {}", block.previous_blockhash);
if block.block_time.is_some() {
println!("Block Time: {:?}", block.block_time);
}
if !block.rewards.is_empty() {
println!("Rewards: {:?}", block.rewards);
}
for (index, transaction_with_meta) in block.transactions.iter().enumerate() {
println!("Transaction {}:", index);
println_transaction(
&transaction_with_meta.transaction.decode().unwrap(),
&transaction_with_meta.meta,
" ",
);
}
Ok(())
}
async fn blocks(starting_slot: Slot, limit: usize) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(false)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
let slots = bigtable.get_confirmed_blocks(starting_slot, limit).await?;
println!("{:?}", slots);
println!("{} blocks found", slots.len());
Ok(())
}
async fn confirm(signature: &Signature, verbose: bool) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(false)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
let transaction_status = bigtable.get_signature_status(signature).await?;
if verbose {
match bigtable
.get_confirmed_transaction(signature, UiTransactionEncoding::Binary)
.await
{
Ok(Some(confirmed_transaction)) => {
println!(
"\nTransaction executed in slot {}:",
confirmed_transaction.slot
);
println_transaction(
&confirmed_transaction
.transaction
.transaction
.decode()
.expect("Successful decode"),
&confirmed_transaction.transaction.meta,
" ",
);
}
Ok(None) => println!("Confirmed transaction details not available"),
Err(err) => println!("Unable to get confirmed transaction details: {}", err),
}
println!();
}
match transaction_status.status {
Ok(_) => println!("Confirmed"),
Err(err) => println!("Transaction failed: {}", err),
}
Ok(())
}
pub async fn transaction_history(
address: &Pubkey,
mut limit: usize,
mut before: Option<Signature>,
verbose: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(true).await?;
while limit > 0 {
let results = bigtable
.get_confirmed_signatures_for_address(address, before.as_ref(), limit.min(1000))
.await?;
if results.is_empty() {
break;
}
before = Some(results.last().unwrap().signature);
assert!(limit >= results.len());
limit = limit.saturating_sub(results.len());
for result in results {
if verbose {
println!(
"{}, slot={}, memo=\"{}\", status={}",
result.signature,
result.slot,
result.memo.unwrap_or_else(|| "".to_string()),
match result.err {
None => "Confirmed".to_string(),
Some(err) => format!("Failed: {:?}", err),
}
);
} else {
println!("{}", result.signature);
}
}
}
Ok(())
}
pub trait BigTableSubCommand {
fn bigtable_subcommand(self) -> Self;
}
impl BigTableSubCommand for App<'_, '_> {
fn bigtable_subcommand(self) -> Self {
self.subcommand(
SubCommand::with_name("bigtable")
.about("Ledger data on a BigTable instance")
.setting(AppSettings::ArgRequiredElseHelp)
.subcommand(
SubCommand::with_name("upload")
.about("Upload the ledger to BigTable")
.arg(
Arg::with_name("starting_slot")
.long("starting-slot")
.validator(is_slot)
.value_name("SLOT")
.takes_value(true)
.index(1)
.help(
"Start uploading at this slot [default: first available slot]",
),
)
.arg(
Arg::with_name("ending_slot")
.long("ending-slot")
.validator(is_slot)
.value_name("SLOT")
.takes_value(true)
.index(2)
.help("Stop uploading at this slot [default: last available slot]"),
)
.arg(
Arg::with_name("allow_missing_metadata")
.long("allow-missing-metadata")
.takes_value(false)
.help("Don't panic if transaction metadata is missing"),
),
)
.subcommand(
SubCommand::with_name("first-available-block")
.about("Get the first available block in the storage"),
)
.subcommand(
SubCommand::with_name("blocks")
.about("Get a list of slots with confirmed blocks for the given range")
.arg(
Arg::with_name("starting_slot")
.long("starting-slot")
.validator(is_slot)
.value_name("SLOT")
.takes_value(true)
.index(1)
.required(true)
.default_value("0")
.help("Start listing at this slot"),
)
.arg(
Arg::with_name("limit")
.long("limit")
.validator(is_slot)
.value_name("LIMIT")
.takes_value(true)
.index(2)
.required(true)
.default_value("1000")
.help("Maximum number of slots to return"),
),
)
.subcommand(
SubCommand::with_name("block")
.about("Get a confirmed block")
.arg(
Arg::with_name("slot")
.long("slot")
.validator(is_slot)
.value_name("SLOT")
.takes_value(true)
.index(1)
.required(true),
),
)
.subcommand(
SubCommand::with_name("confirm")
.about("Confirm transaction by signature")
.arg(
Arg::with_name("signature")
.long("signature")
.value_name("TRANSACTION_SIGNATURE")
.takes_value(true)
.required(true)
.index(1)
.help("The transaction signature to confirm"),
)
.arg(
Arg::with_name("verbose")
.short("v")
.long("verbose")
.takes_value(false)
.help("Show additional information"),
),
)
.subcommand(
SubCommand::with_name("transaction-history")
.about(
"Show historical transactions affecting the given address \
from newest to oldest",
)
.arg(
Arg::with_name("address")
.index(1)
.value_name("ADDRESS")
.required(true)
.validator(is_valid_pubkey)
.help("Account address"),
)
.arg(
Arg::with_name("limit")
.long("limit")
.takes_value(true)
.value_name("LIMIT")
.validator(is_slot)
.index(2)
.default_value("18446744073709551615")
.help("Maximum number of transaction signatures to return"),
)
.arg(
Arg::with_name("before")
.long("before")
.value_name("TRANSACTION_SIGNATURE")
.takes_value(true)
.help("Start with the first signature older than this one"),
)
.arg(
Arg::with_name("verbose")
.short("v")
.long("verbose")
.takes_value(false)
.help("Show additional information"),
),
),
)
}
}
pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
let mut runtime = tokio::runtime::Runtime::new().unwrap();
let future = match matches.subcommand() {
("upload", Some(arg_matches)) => {
let starting_slot = value_t!(arg_matches, "starting_slot", Slot).unwrap_or(0);
let ending_slot = value_t!(arg_matches, "ending_slot", Slot).ok();
let allow_missing_metadata = arg_matches.is_present("allow_missing_metadata");
let blockstore =
crate::open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None);
runtime.block_on(upload(
blockstore,
starting_slot,
ending_slot,
allow_missing_metadata,
))
}
("first-available-block", Some(_arg_matches)) => runtime.block_on(first_available_block()),
("block", Some(arg_matches)) => {
let slot = value_t_or_exit!(arg_matches, "slot", Slot);
runtime.block_on(block(slot))
}
("blocks", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let limit = value_t_or_exit!(arg_matches, "limit", usize);
runtime.block_on(blocks(starting_slot, limit))
}
("confirm", Some(arg_matches)) => {
let signature = arg_matches
.value_of("signature")
.unwrap()
.parse()
.expect("Invalid signature");
let verbose = arg_matches.is_present("verbose");
runtime.block_on(confirm(&signature, verbose))
}
("transaction-history", Some(arg_matches)) => {
let address = pubkey_of(arg_matches, "address").unwrap();
let limit = value_t_or_exit!(arg_matches, "limit", usize);
let before = arg_matches
.value_of("before")
.map(|signature| signature.parse().expect("Invalid signature"));
let verbose = arg_matches.is_present("verbose");
runtime.block_on(transaction_history(&address, limit, before, verbose))
}
_ => unreachable!(),
};
future.unwrap_or_else(|err| {
eprintln!("{:?}", err);
exit(1);
});
}

View File

@ -2,6 +2,7 @@ use clap::{
crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App, Arg,
ArgMatches, SubCommand,
};
use log::*;
use regex::Regex;
use serde_json::json;
use solana_clap_utils::input_validators::{is_parsable, is_slot};
@ -43,7 +44,8 @@ use std::{
sync::Arc,
};
use log::*;
mod bigtable;
use bigtable::*;
#[derive(PartialEq)]
enum LedgerOutputMethod {
@ -805,6 +807,7 @@ fn main() {
.global(true)
.help("Use DIR for ledger location"),
)
.bigtable_subcommand()
.subcommand(
SubCommand::with_name("print")
.about("Print the ledger")
@ -1145,6 +1148,7 @@ fn main() {
.map(BlockstoreRecoveryMode::from);
match matches.subcommand() {
("bigtable", Some(arg_matches)) => bigtable_process_command(&ledger_path, arg_matches),
("print", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let num_slots = value_t!(arg_matches, "num_slots", Slot).ok();

View File

@ -37,14 +37,15 @@ use solana_sdk::{
transaction::Transaction,
};
use solana_transaction_status::{
ConfirmedBlock, ConfirmedTransaction, EncodedTransaction, Rewards, TransactionStatusMeta,
TransactionWithStatusMeta, UiTransactionEncoding, UiTransactionStatusMeta,
ConfirmedBlock, ConfirmedTransaction, ConfirmedTransactionStatusWithSignature,
EncodedTransaction, Rewards, TransactionStatusMeta, TransactionWithStatusMeta,
UiTransactionEncoding, UiTransactionStatusMeta,
};
use solana_vote_program::{vote_instruction::VoteInstruction, vote_state::TIMESTAMP_SLOT_INTERVAL};
use std::{
cell::RefCell,
cmp,
collections::HashMap,
collections::{HashMap, HashSet},
fs,
io::{Error as IOError, ErrorKind},
path::{Path, PathBuf},
@ -1658,7 +1659,7 @@ impl Blockstore {
iterator
.map(|transaction| {
let signature = transaction.signatures[0];
let encoded_transaction = EncodedTransaction::encode(transaction, encoding.clone());
let encoded_transaction = EncodedTransaction::encode(transaction, encoding);
TransactionWithStatusMeta {
transaction: encoded_transaction,
meta: self
@ -1803,9 +1804,9 @@ impl Blockstore {
(transaction_status_cf_primary_index, signature, 0),
IteratorDirection::Forward,
))?;
for ((_, sig, slot), data) in index_iterator {
for ((i, sig, slot), data) in index_iterator {
counter += 1;
if sig != signature {
if i != transaction_status_cf_primary_index || sig != signature {
break;
}
if self.is_root(slot) {
@ -1841,8 +1842,9 @@ impl Blockstore {
("method", "get_confirmed_transaction".to_string(), String)
);
if let Some((slot, status)) = self.get_transaction_status(signature)? {
let transaction = self.find_transaction_in_slot(slot, signature)?
.expect("Transaction to exist in slot entries if it exists in statuses and hasn't been cleaned up");
let transaction = self
.find_transaction_in_slot(slot, signature)?
.ok_or(BlockstoreError::TransactionStatusSlotMismatch)?; // Should not happen
let encoding = encoding.unwrap_or(UiTransactionEncoding::Json);
let encoded_transaction = EncodedTransaction::encode(transaction, encoding);
Ok(Some(ConfirmedTransaction {
@ -1871,7 +1873,8 @@ impl Blockstore {
}
// Returns all cached signatures for an address, ordered by slot that the transaction was
// processed in
// processed in. Within each slot the transactions will be ordered by signature, and NOT by
// the order in which the transactions exist in the block
fn find_address_signatures(
&self,
pubkey: Pubkey,
@ -1903,6 +1906,30 @@ impl Blockstore {
Ok(signatures)
}
fn get_lowest_slot_for_address(&self, address: Pubkey) -> Result<Option<Slot>> {
let mut lowest_slot = None;
for transaction_status_cf_primary_index in 0..=1 {
let mut index_iterator = self.address_signatures_cf.iter(IteratorMode::From(
(
transaction_status_cf_primary_index,
address,
0,
Signature::default(),
),
IteratorDirection::Forward,
))?;
if let Some(((i, key_address, slot, _), _)) = index_iterator.next() {
if i == transaction_status_cf_primary_index
&& key_address == address
&& slot < lowest_slot.unwrap_or(Slot::MAX)
{
lowest_slot = Some(slot);
}
}
}
Ok(lowest_slot)
}
pub fn get_confirmed_signatures_for_address(
&self,
pubkey: Pubkey,
@ -1921,6 +1948,126 @@ impl Blockstore {
.map(|signatures| signatures.iter().map(|(_, signature)| *signature).collect())
}
pub fn get_confirmed_signatures_for_address2(
&self,
address: Pubkey,
highest_confirmed_root: Slot,
before: Option<Signature>,
limit: usize,
) -> Result<Vec<ConfirmedTransactionStatusWithSignature>> {
datapoint_info!(
"blockstore-rpc-api",
(
"method",
"get_confirmed_signatures_for_address2".to_string(),
String
)
);
// Figure the `slot` to start listing signatures at, based on the ledger location of the
// `before` signature if present. Also generate a HashSet of signatures that should
// be excluded from the results.
let (mut slot, mut excluded_signatures) = match before {
None => (highest_confirmed_root, None),
Some(before) => {
let transaction_status = self.get_transaction_status(before)?;
match transaction_status {
None => return Ok(vec![]),
Some((slot, _)) => {
let confirmed_block = self
.get_confirmed_block(slot, Some(UiTransactionEncoding::Binary))
.map_err(|err| {
BlockstoreError::IO(IOError::new(
ErrorKind::Other,
format!("Unable to get confirmed block: {}", err),
))
})?;
// Load all signatures for the block
let mut slot_signatures: Vec<_> = confirmed_block
.transactions
.iter()
.filter_map(|transaction_with_meta| {
if let Some(transaction) =
transaction_with_meta.transaction.decode()
{
transaction.signatures.into_iter().next()
} else {
None
}
})
.collect();
// Sort signatures as a way to entire a stable ordering within a slot, as
// `self.find_address_signatures()` is ordered by signatures ordered and
// not by block ordering
slot_signatures.sort();
if let Some(pos) = slot_signatures.iter().position(|&x| x == before) {
slot_signatures.truncate(pos + 1);
}
(
slot,
Some(slot_signatures.into_iter().collect::<HashSet<_>>()),
)
}
}
}
};
// Fetch the list of signatures that affect the given address
let first_available_block = self.get_first_available_block()?;
let first_address_slot = self.get_lowest_slot_for_address(address)?;
if first_address_slot.is_none() {
return Ok(vec![]);
}
let lower_bound = cmp::max(first_available_block, first_address_slot.unwrap());
let mut address_signatures = vec![];
loop {
if address_signatures.len() >= limit {
address_signatures.truncate(limit);
break;
}
let mut signatures = self.find_address_signatures(address, slot, slot)?;
if let Some(excluded_signatures) = excluded_signatures.take() {
address_signatures.extend(
signatures
.into_iter()
.filter(|(_, signature)| !excluded_signatures.contains(&signature)),
)
} else {
address_signatures.append(&mut signatures);
}
excluded_signatures = None;
if slot == lower_bound {
break;
}
slot -= 1;
}
address_signatures.truncate(limit);
// Fill in the status information for each found transaction
let mut infos = vec![];
for (slot, signature) in address_signatures.into_iter() {
let transaction_status = self.get_transaction_status(signature)?;
let err = match transaction_status {
None => None,
Some((_slot, status)) => status.status.err(),
};
infos.push(ConfirmedTransactionStatusWithSignature {
signature,
slot,
err,
memo: None,
});
}
Ok(infos)
}
pub fn read_rewards(&self, index: Slot) -> Result<Option<Rewards>> {
self.rewards_cf.get(index)
}
@ -5942,6 +6089,19 @@ pub mod tests {
}
}
#[test]
fn test_empty_transaction_status() {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.set_roots(&[0]).unwrap();
assert_eq!(
blockstore
.get_confirmed_transaction(Signature::default(), None)
.unwrap(),
None
);
}
#[test]
fn test_get_confirmed_signatures_for_address() {
let blockstore_path = get_tmp_ledger_path!();
@ -6082,6 +6242,244 @@ pub mod tests {
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_get_lowest_slot_for_address() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let address = Pubkey::new_rand();
let address2 = Pubkey::new_rand();
let slot = 5;
// Add an additional to record to ensure that existent or lower slots in entries for
// other addresses do not affect return
blockstore
.address_signatures_cf
.put(
(0, address2, slot, Signature::default()),
&AddressSignatureMeta { writeable: false },
)
.unwrap();
assert_eq!(
blockstore.get_lowest_slot_for_address(address).unwrap(),
None
);
let slot = 200;
blockstore
.address_signatures_cf
.put(
(0, address, slot, Signature::default()),
&AddressSignatureMeta { writeable: false },
)
.unwrap();
assert_eq!(
blockstore.get_lowest_slot_for_address(address).unwrap(),
Some(200)
);
blockstore
.address_signatures_cf
.put(
(1, address, slot, Signature::default()),
&AddressSignatureMeta { writeable: false },
)
.unwrap();
assert_eq!(
blockstore.get_lowest_slot_for_address(address).unwrap(),
Some(200)
);
let slot = 300;
blockstore
.address_signatures_cf
.put(
(1, address, slot, Signature::default()),
&AddressSignatureMeta { writeable: false },
)
.unwrap();
assert_eq!(
blockstore.get_lowest_slot_for_address(address).unwrap(),
Some(200)
);
let slot = 100;
blockstore
.address_signatures_cf
.put(
(1, address, slot, Signature::default()),
&AddressSignatureMeta { writeable: false },
)
.unwrap();
assert_eq!(
blockstore.get_lowest_slot_for_address(address).unwrap(),
Some(100)
);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_get_confirmed_signatures_for_address2() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
fn make_slot_entries_with_transaction_addresses(addresses: &[Pubkey]) -> Vec<Entry> {
let mut entries: Vec<Entry> = Vec::new();
for address in addresses {
let transaction = Transaction::new_with_compiled_instructions(
&[&Keypair::new()],
&[*address],
Hash::default(),
vec![Pubkey::new_rand()],
vec![CompiledInstruction::new(1, &(), vec![0])],
);
entries.push(next_entry_mut(&mut Hash::default(), 0, vec![transaction]));
let mut tick = create_ticks(1, 0, hash(&serialize(address).unwrap()));
entries.append(&mut tick);
}
entries
}
let address0 = Pubkey::new_rand();
let address1 = Pubkey::new_rand();
for slot in 2..=4 {
let entries = make_slot_entries_with_transaction_addresses(&[
address0, address1, address0, address1,
]);
let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0);
blockstore.insert_shreds(shreds, None, false).unwrap();
for entry in &entries {
for transaction in &entry.transactions {
assert_eq!(transaction.signatures.len(), 1);
blockstore
.write_transaction_status(
slot,
transaction.signatures[0],
transaction.message.account_keys.iter().collect(),
vec![],
&TransactionStatusMeta::default(),
)
.unwrap();
}
}
}
blockstore.set_roots(&[1, 2, 3, 4]).unwrap();
let highest_confirmed_root = 4;
// Fetch all signatures for address 0 at once...
let all0 = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
None,
usize::MAX,
)
.unwrap();
assert_eq!(all0.len(), 6);
// Fetch all signatures for address 1 at once...
let all1 = blockstore
.get_confirmed_signatures_for_address2(
address1,
highest_confirmed_root,
None,
usize::MAX,
)
.unwrap();
assert_eq!(all1.len(), 6);
assert!(all0 != all1);
// Fetch all signatures for address 0 individually
for i in 0..all0.len() {
let results = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
if i == 0 {
None
} else {
Some(all0[i - 1].signature)
},
1,
)
.unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0], all0[i], "Unexpected result for {}", i);
}
assert!(blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
Some(all0[all0.len() - 1].signature),
1,
)
.unwrap()
.is_empty());
// Fetch all signatures for address 0, three at a time
assert!(all0.len() % 3 == 0);
for i in (0..all0.len()).step_by(3) {
let results = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
if i == 0 {
None
} else {
Some(all0[i - 1].signature)
},
3,
)
.unwrap();
assert_eq!(results.len(), 3);
assert_eq!(results[0], all0[i]);
assert_eq!(results[1], all0[i + 1]);
assert_eq!(results[2], all0[i + 2]);
}
// Ensure that the signatures within a slot are ordered by signature
// (current limitation of the .get_confirmed_signatures_for_address2())
for i in (0..all1.len()).step_by(2) {
let results = blockstore
.get_confirmed_signatures_for_address2(
address1,
highest_confirmed_root,
if i == 0 {
None
} else {
Some(all1[i - 1].signature)
},
2,
)
.unwrap();
assert_eq!(results.len(), 2);
assert_eq!(results[0].slot, results[1].slot);
assert!(results[0].signature <= results[1].signature);
assert_eq!(results[0], all1[i]);
assert_eq!(results[1], all1[i + 1]);
}
// A search for address 0 with a `before` signature from address1 should also work
let results = blockstore
.get_confirmed_signatures_for_address2(
address0,
highest_confirmed_root,
Some(all1[0].signature),
usize::MAX,
)
.unwrap();
// The exact number of results returned is variable, based on the sort order of the
// random signatures that are generated
assert!(!results.is_empty());
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_get_last_hash() {
let mut entries: Vec<Entry> = vec![];

View File

@ -58,6 +58,7 @@ pub enum BlockstoreError {
SlotCleanedUp,
UnpackError(#[from] UnpackError),
UnableToSetOpenFileDescriptorLimit,
TransactionStatusSlotMismatch,
}
pub type Result<T> = std::result::Result<T, BlockstoreError>;

View File

@ -48,6 +48,9 @@ while [[ -n $1 ]]; do
elif [[ $1 = --enable-rpc-transaction-history ]]; then
args+=("$1")
shift
elif [[ $1 = --enable-rpc-bigtable-ledger-storage ]]; then
args+=("$1")
shift
elif [[ $1 = --skip-poh-verify ]]; then
args+=("$1")
shift

View File

@ -1885,6 +1885,7 @@ dependencies = [
"bv",
"byteorder 1.3.4",
"chrono",
"curve25519-dalek",
"ed25519-dalek",
"generic-array 0.14.3",
"hex",

View File

@ -30,6 +30,10 @@ extern uint64_t entrypoint(const uint8_t *input) {
return ERROR_INVALID_ARGUMENT;
}
uint8_t nonce1 = params.data[1];
uint8_t nonce2 = params.data[2];
uint8_t nonce3 = params.data[3];
switch (params.data[0]) {
case TEST_SUCCESS: {
sol_log("Call system program");
@ -81,6 +85,18 @@ extern uint64_t entrypoint(const uint8_t *input) {
sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts)));
}
sol_log("Test create_program_address");
{
uint8_t seed1[] = {'Y', 'o', 'u', ' ', 'p', 'a', 's', 's',
' ', 'b', 'u', 't', 't', 'e', 'r'};
const SolSignerSeed seeds1[] = {{seed1, SOL_ARRAY_SIZE(seed1)},
{&nonce1, 1}};
SolPubkey address;
sol_assert(SUCCESS == sol_create_program_address(seeds1, SOL_ARRAY_SIZE(seeds1),
params.program_id, &address));
sol_assert(SolPubkey_same(&address, accounts[DERIVED_KEY1_INDEX].key));
}
sol_log("Test derived signers");
{
sol_assert(!accounts[DERIVED_KEY1_INDEX].is_signer);
@ -92,19 +108,15 @@ extern uint64_t entrypoint(const uint8_t *input) {
{accounts[DERIVED_KEY1_INDEX].key, true, true},
{accounts[DERIVED_KEY2_INDEX].key, true, false},
{accounts[DERIVED_KEY3_INDEX].key, false, false}};
uint8_t data[] = {TEST_DERIVED_SIGNERS};
uint8_t data[] = {TEST_DERIVED_SIGNERS, nonce2, nonce3};
const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key,
arguments, SOL_ARRAY_SIZE(arguments),
data, SOL_ARRAY_SIZE(data)};
uint8_t seed1[] = {'Y', 'o', 'u', ' ', 'p', 'a', 's', 's',
' ', 'b', 'u', 't', 't', 'e', 'r'};
uint8_t seed2[] = {'L', 'i', 'l', '\''};
uint8_t seed3[] = {'B', 'i', 't', 's'};
const SolSignerSeed seeds1[] = {{seed1, SOL_ARRAY_SIZE(seed1)}};
const SolSignerSeed seeds2[] = {{seed2, SOL_ARRAY_SIZE(seed2)},
{seed3, SOL_ARRAY_SIZE(seed3)}};
const SolSignerSeeds signers_seeds[] = {{seeds1, SOL_ARRAY_SIZE(seeds1)},
{seeds2, SOL_ARRAY_SIZE(seeds2)}};
const SolSignerSeed seeds1[] = {{seed1, SOL_ARRAY_SIZE(seed1)},
{&nonce1, 1}};
const SolSignerSeeds signers_seeds[] = {{seeds1, SOL_ARRAY_SIZE(seeds1)}};
sol_assert(SUCCESS == sol_invoke_signed(&instruction, accounts,
SOL_ARRAY_SIZE(accounts),
signers_seeds,

View File

@ -92,6 +92,9 @@ extern uint64_t entrypoint(const uint8_t *input) {
sol_assert(!accounts[DERIVED_KEY2_INDEX].is_signer);
sol_assert(!accounts[DERIVED_KEY2_INDEX].is_signer);
uint8_t nonce2 = params.data[1];
uint8_t nonce3 = params.data[2];
SolAccountMeta arguments[] = {
{accounts[DERIVED_KEY1_INDEX].key, true, false},
{accounts[DERIVED_KEY2_INDEX].key, true, true},
@ -103,9 +106,11 @@ extern uint64_t entrypoint(const uint8_t *input) {
uint8_t seed1[] = {'L', 'i', 'l', '\''};
uint8_t seed2[] = {'B', 'i', 't', 's'};
const SolSignerSeed seeds1[] = {{seed1, SOL_ARRAY_SIZE(seed1)},
{seed2, SOL_ARRAY_SIZE(seed2)}};
{seed2, SOL_ARRAY_SIZE(seed2)},
{&nonce2, 1}};
const SolSignerSeed seeds2[] = {
{(uint8_t *)accounts[DERIVED_KEY2_INDEX].key, SIZE_PUBKEY}};
{(uint8_t *)accounts[DERIVED_KEY2_INDEX].key, SIZE_PUBKEY},
{&nonce3, 1}};
const SolSignerSeeds signers_seeds[] = {{seeds1, SOL_ARRAY_SIZE(seeds1)},
{seeds2, SOL_ARRAY_SIZE(seeds2)}};

View File

@ -10,7 +10,7 @@ use solana_sdk::{
entrypoint,
entrypoint::ProgramResult,
info,
program::{invoke, invoke_signed},
program::{create_program_address, invoke, invoke_signed},
program_error::ProgramError,
pubkey::Pubkey,
system_instruction,
@ -34,12 +34,16 @@ const FROM_INDEX: usize = 10;
entrypoint!(process_instruction);
fn process_instruction(
_program_id: &Pubkey,
program_id: &Pubkey,
accounts: &[AccountInfo],
instruction_data: &[u8],
) -> ProgramResult {
info!("invoke Rust program");
let nonce1 = instruction_data[1];
let nonce2 = instruction_data[2];
let nonce3 = instruction_data[3];
match instruction_data[0] {
TEST_SUCCESS => {
info!("Call system program");
@ -91,6 +95,12 @@ fn process_instruction(
);
}
info!("Test create_program_address");
{
let address = create_program_address(&[b"You pass butter", &[nonce1]], program_id)?;
assert_eq!(&address, accounts[DERIVED_KEY1_INDEX].key);
}
info!("Test derived signers");
{
assert!(!accounts[DERIVED_KEY1_INDEX].is_signer);
@ -105,12 +115,12 @@ fn process_instruction(
(accounts[DERIVED_KEY2_INDEX].key, true, false),
(accounts[DERIVED_KEY3_INDEX].key, false, false),
],
vec![TEST_DERIVED_SIGNERS],
vec![TEST_DERIVED_SIGNERS, nonce2, nonce3],
);
invoke_signed(
&invoked_instruction,
accounts,
&[&[b"You pass butter"], &[b"Lil'", b"Bits"]],
&[&[b"You pass butter", &[nonce1]]],
)?;
}

View File

@ -120,6 +120,8 @@ fn process_instruction(
assert!(!accounts[DERIVED_KEY2_INDEX].is_signer);
assert!(!accounts[DERIVED_KEY3_INDEX].is_signer);
let nonce2 = instruction_data[1];
let nonce3 = instruction_data[2];
let invoked_instruction = create_instruction(
*accounts[INVOKED_PROGRAM_INDEX].key,
&[
@ -133,8 +135,8 @@ fn process_instruction(
&invoked_instruction,
accounts,
&[
&[b"Lil'", b"Bits"],
&[accounts[DERIVED_KEY2_INDEX].key.as_ref()],
&[b"Lil'", b"Bits", &[nonce2]],
&[accounts[DERIVED_KEY2_INDEX].key.as_ref(), &[nonce3]],
],
)?;
}

View File

@ -358,13 +358,12 @@ mod bpf {
let account = Account::new(43, 0, &solana_sdk::system_program::id());
bank.store_account(&from_keypair.pubkey(), &account);
let derived_key1 =
Pubkey::create_program_address(&[b"You pass butter"], &invoke_program_id).unwrap();
let derived_key2 =
Pubkey::create_program_address(&[b"Lil'", b"Bits"], &invoked_program_id).unwrap();
let derived_key3 =
Pubkey::create_program_address(&[derived_key2.as_ref()], &invoked_program_id)
.unwrap();
let (derived_key1, nonce1) =
Pubkey::find_program_address(&[b"You pass butter"], &invoke_program_id);
let (derived_key2, nonce2) =
Pubkey::find_program_address(&[b"Lil'", b"Bits"], &invoked_program_id);
let (derived_key3, nonce3) =
Pubkey::find_program_address(&[derived_key2.as_ref()], &invoked_program_id);
let mint_pubkey = mint_keypair.pubkey();
let account_metas = vec![
@ -383,8 +382,11 @@ mod bpf {
// success cases
let instruction =
Instruction::new(invoke_program_id, &TEST_SUCCESS, account_metas.clone());
let instruction = Instruction::new(
invoke_program_id,
&[TEST_SUCCESS, nonce1, nonce2, nonce3],
account_metas.clone(),
);
let message = Message::new(&[instruction], Some(&mint_pubkey));
assert!(bank_client
.send_and_confirm_message(

View File

@ -78,7 +78,6 @@ pub fn register_syscalls<'a>(
vm.register_syscall_ex("abort", syscall_abort)?;
vm.register_syscall_ex("sol_panic_", syscall_sol_panic)?;
vm.register_syscall_with_context_ex(
"sol_log_",
Box::new(SyscallLog {
@ -91,6 +90,9 @@ pub fn register_syscalls<'a>(
logger: invoke_context.get_logger(),
}),
)?;
if invoke_context.is_cross_program_supported() {
vm.register_syscall_ex("sol_create_program_address", syscall_create_program_address)?;
}
// Cross-program invocation syscalls
@ -331,6 +333,36 @@ impl SyscallObject<BPFError> for SyscallSolAllocFree {
}
}
/// Create a program address
pub fn syscall_create_program_address(
seeds_addr: u64,
seeds_len: u64,
program_id_addr: u64,
address_addr: u64,
_arg5: u64,
ro_regions: &[MemoryRegion],
rw_regions: &[MemoryRegion],
) -> Result<u64, EbpfError<BPFError>> {
let untranslated_seeds = translate_slice!(&[&str], seeds_addr, seeds_len, ro_regions)?;
let seeds = untranslated_seeds
.iter()
.map(|untranslated_seed| {
translate_slice!(
u8,
untranslated_seed.as_ptr(),
untranslated_seed.len(),
ro_regions
)
})
.collect::<Result<Vec<_>, EbpfError<BPFError>>>()?;
let program_id = translate_type!(Pubkey, program_id_addr, rw_regions)?;
let new_address =
Pubkey::create_program_address(&seeds, program_id).map_err(SyscallError::BadSeeds)?;
let address = translate_slice_mut!(u8, address_addr, 32, ro_regions)?;
address.copy_from_slice(new_address.as_ref());
Ok(0)
}
// Cross-program invocation syscalls
pub type TranslatedAccounts<'a> = (Vec<Rc<RefCell<Account>>>, Vec<(&'a mut u64, &'a mut [u8])>);
@ -815,6 +847,12 @@ fn call<'a>(
let account = account.borrow();
if message.is_writable(i) && !account.executable {
*lamport_ref = account.lamports;
if data.len() != account.data.len() {
return Err(SyscallError::InstructionError(
InstructionError::AccountDataSizeChanged,
)
.into());
}
data.clone_from_slice(&account.data);
}
}

View File

@ -76,7 +76,7 @@ pub const SECONDS_PER_YEAR: f64 = 365.25 * 24.0 * 60.0 * 60.0;
pub const MAX_LEADER_SCHEDULE_STAKES: Epoch = 5;
type BankStatusCache = StatusCache<Result<()>>;
#[frozen_abi(digest = "BHtoJzwGJ1seQ2gZmtPSLLgdvq3gRZMj5mpUJsX4wGHT")]
#[frozen_abi(digest = "3bFpd1M1YHHKFASfjUU5L9dUkg87TKuMzwUoUebwa8Pu")]
pub type BankSlotDelta = SlotDelta<Result<()>>;
type TransactionAccountRefCells = Vec<Rc<RefCell<Account>>>;
type TransactionLoaderRefCells = Vec<Vec<(Pubkey, RefCell<Account>)>>;

View File

@ -18,6 +18,7 @@ default = [
"assert_matches",
"byteorder",
"chrono",
"curve25519-dalek",
"generic-array",
"memmap",
"rand",
@ -35,6 +36,7 @@ bs58 = "0.3.1"
bv = { version = "0.11.1", features = ["serde"] }
byteorder = { version = "1.3.4", optional = true }
chrono = { version = "0.4", optional = true }
curve25519-dalek = { version = "2.1.0", optional = true }
generic-array = { version = "0.14.3", default-features = false, features = ["serde", "more_lengths"], optional = true }
hex = "0.4.2"
hmac = "0.7.0"

View File

@ -111,6 +111,8 @@ static_assert(sizeof(uint64_t) == 8);
#define ERROR_ACCOUNT_BORROW_FAILED TO_BUILTIN(12)
/** The length of the seed is too long for address generation */
#define MAX_SEED_LENGTH_EXCEEDED TO_BUILTIN(13)
/** Provided seeds do not result in a valid address */
#define INVALID_SEEDS TO_BUILTIN(14)
/**
* Boolean type
@ -390,12 +392,29 @@ typedef struct {
uint64_t len; /** Number of seeds */
} SolSignerSeeds;
/*
* Create a program address
*
* @param seeds Seed strings used to sign program accounts
* @param seeds_len Length of the seeds array
* @param Progam id of the signer
* @param Program address created, filled on return
*/
static uint64_t sol_create_program_address(
const SolSignerSeed *seeds,
int seeds_len,
const SolPubkey *program_id,
const SolPubkey *address
);
/**
* Cross-program invocation
* * @{
*/
/*
* Invoke another program and sign for some of the keys
*
* @param instruction Instruction to process
* @param account_infos Accounts used by instruction
* @param account_infos_len Length of account_infos array
@ -426,9 +445,11 @@ static uint64_t sol_invoke_signed(
);
}
/*
* @param instruction Instruction to process
* @param account_infos Accounts used by instruction
* @param account_infos_len Length of account_infos array
* Invoke another program
*
* @param instruction Instruction to process
* @param account_infos Accounts used by instruction
* @param account_infos_len Length of account_infos array
*/
static uint64_t sol_invoke(
const SolInstruction *instruction,

View File

@ -155,6 +155,10 @@ pub enum InstructionError {
/// Length of the seed is too long for address generation
#[error("Length of the seed is too long for address generation")]
MaxSeedLengthExceeded,
/// Provided seeds do not result in a valid address
#[error("Provided seeds do not result in a valid address")]
InvalidSeeds,
}
impl InstructionError {

View File

@ -2,9 +2,39 @@
use crate::{
account_info::AccountInfo, entrypoint::ProgramResult, entrypoint::SUCCESS,
instruction::Instruction,
instruction::Instruction, program_error::ProgramError, pubkey::Pubkey,
};
pub fn create_program_address(
seeds: &[&[u8]],
program_id: &Pubkey,
) -> Result<Pubkey, ProgramError> {
let bytes = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
];
let result = unsafe {
sol_create_program_address(
seeds as *const _ as *const u8,
seeds.len() as u64,
program_id as *const _ as *const u8,
&bytes as *const _ as *const u8,
)
};
match result {
SUCCESS => Ok(Pubkey::new(&bytes)),
_ => Err(result.into()),
}
}
extern "C" {
fn sol_create_program_address(
seeds_addr: *const u8,
seeds_len: u64,
program_id_addr: *const u8,
address_bytes_addr: *const u8,
) -> u64;
}
/// Invoke a cross-program instruction
pub fn invoke(instruction: &Instruction, account_infos: &[AccountInfo]) -> ProgramResult {
invoke_signed(instruction, account_infos, &[])
@ -30,7 +60,6 @@ pub fn invoke_signed(
_ => Err(result.into()),
}
}
extern "C" {
fn sol_invoke_signed_rust(
instruction_addr: *const u8,

View File

@ -40,6 +40,8 @@ pub enum ProgramError {
AccountBorrowFailed,
#[error("Length of the seed is too long for address generation")]
MaxSeedLengthExceeded,
#[error("Provided seeds do not result in a valid address")]
InvalidSeeds,
}
pub trait PrintProgramError {
@ -73,6 +75,7 @@ impl PrintProgramError for ProgramError {
Self::NotEnoughAccountKeys => info!("Error: NotEnoughAccountKeys"),
Self::AccountBorrowFailed => info!("Error: AccountBorrowFailed"),
Self::MaxSeedLengthExceeded => info!("Error: MaxSeedLengthExceeded"),
Self::InvalidSeeds => info!("Error: InvalidSeeds"),
}
}
}
@ -98,6 +101,7 @@ const UNINITIALIZED_ACCOUNT: u64 = to_builtin!(10);
const NOT_ENOUGH_ACCOUNT_KEYS: u64 = to_builtin!(11);
const ACCOUNT_BORROW_FAILED: u64 = to_builtin!(12);
const MAX_SEED_LENGTH_EXCEEDED: u64 = to_builtin!(13);
const INVALID_SEEDS: u64 = to_builtin!(14);
impl From<ProgramError> for u64 {
fn from(error: ProgramError) -> Self {
@ -114,6 +118,8 @@ impl From<ProgramError> for u64 {
ProgramError::NotEnoughAccountKeys => NOT_ENOUGH_ACCOUNT_KEYS,
ProgramError::AccountBorrowFailed => ACCOUNT_BORROW_FAILED,
ProgramError::MaxSeedLengthExceeded => MAX_SEED_LENGTH_EXCEEDED,
ProgramError::InvalidSeeds => INVALID_SEEDS,
ProgramError::Custom(error) => {
if error == 0 {
CUSTOM_ZERO
@ -140,6 +146,7 @@ impl From<u64> for ProgramError {
NOT_ENOUGH_ACCOUNT_KEYS => ProgramError::NotEnoughAccountKeys,
ACCOUNT_BORROW_FAILED => ProgramError::AccountBorrowFailed,
MAX_SEED_LENGTH_EXCEEDED => ProgramError::MaxSeedLengthExceeded,
INVALID_SEEDS => ProgramError::InvalidSeeds,
CUSTOM_ZERO => ProgramError::Custom(0),
_ => ProgramError::Custom(error as u32),
}
@ -189,6 +196,7 @@ where
NOT_ENOUGH_ACCOUNT_KEYS => InstructionError::NotEnoughAccountKeys,
ACCOUNT_BORROW_FAILED => InstructionError::AccountBorrowFailed,
MAX_SEED_LENGTH_EXCEEDED => InstructionError::MaxSeedLengthExceeded,
INVALID_SEEDS => InstructionError::InvalidSeeds,
_ => {
// A valid custom error has no bits set in the upper 32
if error >> BUILTIN_BIT_SHIFT == 0 {
@ -205,6 +213,7 @@ impl From<PubkeyError> for ProgramError {
fn from(error: PubkeyError) -> Self {
match error {
PubkeyError::MaxSeedLengthExceeded => ProgramError::MaxSeedLengthExceeded,
PubkeyError::InvalidSeeds => ProgramError::InvalidSeeds,
}
}
}

View File

@ -1,7 +1,6 @@
use crate::{
decode_error::DecodeError,
hash::{hash, hashv, Hasher},
};
#[cfg(not(feature = "program"))]
use crate::hash::Hasher;
use crate::{decode_error::DecodeError, hash::hashv};
use num_derive::{FromPrimitive, ToPrimitive};
#[cfg(not(feature = "program"))]
use std::error;
@ -18,6 +17,8 @@ pub enum PubkeyError {
/// Length of the seed is too long for address generation
#[error("Length of the seed is too long for address generation")]
MaxSeedLengthExceeded,
#[error("Provided seeds do not result in a valid address")]
InvalidSeeds,
}
impl<T> DecodeError<T> for PubkeyError {
fn type_of() -> &'static str {
@ -87,6 +88,9 @@ impl Pubkey {
))
}
/// Create a program address, valid program address must not be on the
/// ed25519 curve
#[cfg(not(feature = "program"))]
pub fn create_program_address(
seeds: &[&[u8]],
program_id: &Pubkey,
@ -99,8 +103,34 @@ impl Pubkey {
hasher.hash(seed);
}
hasher.hashv(&[program_id.as_ref(), "ProgramDerivedAddress".as_ref()]);
let hash = hasher.result();
Ok(Pubkey::new(hash(hasher.result().as_ref()).as_ref()))
if curve25519_dalek::edwards::CompressedEdwardsY::from_slice(hash.as_ref())
.decompress()
.is_some()
{
return Err(PubkeyError::InvalidSeeds);
}
Ok(Pubkey::new(hash.as_ref()))
}
/// Find a valid program address and its corresponding nonce which must be passed
/// as an additional seed when calling `create_program_address`
#[cfg(not(feature = "program"))]
pub fn find_program_address(seeds: &[&[u8]], program_id: &Pubkey) -> (Pubkey, u8) {
let mut nonce = [255];
for _ in 0..std::u8::MAX {
{
let mut seeds_with_nonce = seeds.to_vec();
seeds_with_nonce.push(&nonce);
if let Ok(address) = Self::create_program_address(&seeds_with_nonce, program_id) {
return (address, nonce[0]);
}
}
nonce[0] -= 1;
}
panic!("Unable to find a viable program address nonce");
}
#[cfg(not(feature = "program"))]
@ -259,37 +289,73 @@ mod tests {
Pubkey::create_program_address(&[b"short_seed", exceeded_seed], &program_id),
Err(PubkeyError::MaxSeedLengthExceeded)
);
assert!(Pubkey::create_program_address(&[max_seed], &Pubkey::new_rand()).is_ok());
assert!(Pubkey::create_program_address(&[max_seed], &program_id).is_ok());
assert_eq!(
Pubkey::create_program_address(&[b""], &program_id),
Ok("CsdSsqp6Upkh2qajhZMBM8xT4GAyDNSmcV37g4pN8rsc"
Pubkey::create_program_address(&[b"", &[1]], &program_id),
Ok("3gF2KMe9KiC6FNVBmfg9i267aMPvK37FewCip4eGBFcT"
.parse()
.unwrap())
);
assert_eq!(
Pubkey::create_program_address(&["".as_ref()], &program_id),
Ok("A8mYnN8Pfx7Nn6f8RoQgsPNtAGAWmmKSBCDfyDvE6sXF"
Ok("7ytmC1nT1xY4RfxCV2ZgyA7UakC93do5ZdyhdF3EtPj7"
.parse()
.unwrap())
);
assert_eq!(
Pubkey::create_program_address(&[b"Talking", b"Squirrels"], &program_id),
Ok("CawYq8Rmj4JRR992wVnGEFUjMEkmtmcFgEL4iS1qPczu"
Ok("HwRVBufQ4haG5XSgpspwKtNd3PC9GM9m1196uJW36vds"
.parse()
.unwrap())
);
assert_eq!(
Pubkey::create_program_address(&[public_key.as_ref()], &program_id),
Ok("4ak7qJacCKMAGP8xJtDkg2VYZh5QKExa71ijMDjZGQyb"
Ok("GUs5qLUfsEHkcMB9T38vjr18ypEhRuNWiePW2LoK4E3K"
.parse()
.unwrap())
);
assert_ne!(
Pubkey::create_program_address(&[b"Talking", b"Squirrels"], &program_id),
Pubkey::create_program_address(&[b"Talking"], &program_id),
Pubkey::create_program_address(&[b"Talking", b"Squirrels"], &program_id).unwrap(),
Pubkey::create_program_address(&[b"Talking"], &program_id).unwrap(),
);
}
#[test]
fn test_pubkey_off_curve() {
// try a bunch of random input, all successful generated program
// addresses must land off the curve and be unique
let mut addresses = vec![];
for _ in 0..1_000 {
let program_id = Pubkey::new_rand();
let bytes1 = rand::random::<[u8; 10]>();
let bytes2 = rand::random::<[u8; 32]>();
if let Ok(program_address) =
Pubkey::create_program_address(&[&bytes1, &bytes2], &program_id)
{
let is_on_curve = curve25519_dalek::edwards::CompressedEdwardsY::from_slice(
&program_address.to_bytes(),
)
.decompress()
.is_some();
assert!(!is_on_curve);
assert!(!addresses.contains(&program_address));
addresses.push(program_address);
}
}
}
#[test]
fn test_find_program_address() {
for _ in 0..1_000 {
let program_id = Pubkey::new_rand();
let (address, nonce) = Pubkey::find_program_address(&[b"Lil'", b"Bits"], &program_id);
assert_eq!(
address,
Pubkey::create_program_address(&[b"Lil'", b"Bits", &[nonce]], &program_id).unwrap()
);
}
}
#[test]
fn test_read_write_pubkey() -> Result<(), Box<dyn error::Error>> {
let filename = "test_pubkey.json";

View File

@ -0,0 +1,36 @@
[package]
name = "solana-storage-bigtable"
version = "1.3.0"
description = "Solana Storage BigTable"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
edition = "2018"
[dependencies]
backoff = {version="0.2.1", features = ["tokio"]}
bincode = "1.2.1"
bzip2 = "0.3.3"
enum-iterator = "0.6.0"
flate2 = "1.0.14"
goauth = "0.7.2"
log = "0.4.8"
prost = "0.6.1"
prost-types = "0.6.1"
serde = "1.0.112"
serde_derive = "1.0.103"
smpl_jwt = "0.5.0"
solana-sdk = { path = "../sdk", version = "1.1.20" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.20" }
thiserror = "1.0"
futures = "0.3.5"
tonic = {version="0.3.0", features = ["tls", "transport"]}
zstd = "0.5.1"
[lib]
crate-type = ["lib"]
name = "solana_storage_bigtable"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -0,0 +1,22 @@
## BigTable Setup
### Development Environment
The Cloud BigTable emulator can be used during development/test. See
https://cloud.google.com/bigtable/docs/emulator for general setup information.
Process:
1. Run `gcloud beta emulators bigtable start` in the background
2. Run `$(gcloud beta emulators bigtable env-init)` to establish the `BIGTABLE_EMULATOR_HOST` environment variable
3. Run `./init-bigtable.sh` to configure the emulator
4. Develop/test
### Production Environment
Export a standard `GOOGLE_APPLICATION_CREDENTIALS` environment variable to your
service account credentials. The project should contain a BigTable instance
called `solana-ledger` that has been initialized by running the `./init-bigtable.sh` script.
Depending on what operation mode is required, either the
`https://www.googleapis.com/auth/bigtable.data` or
`https://www.googleapis.com/auth/bigtable.data.readonly` OAuth scope will be
requested using the provided credentials.

View File

@ -0,0 +1,2 @@
googleapis/
target/

340
storage-bigtable/build-proto/Cargo.lock generated Normal file
View File

@ -0,0 +1,340 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "anyhow"
version = "1.0.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85bb70cc08ec97ca5450e6eba421deeea5f172c0fc61f78b5357b2a8e8be195f"
[[package]]
name = "autocfg"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
[[package]]
name = "bytes"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38"
[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "either"
version = "1.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"
[[package]]
name = "fixedbitset"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d"
[[package]]
name = "getrandom"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "hashbrown"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34f595585f103464d8d2f6e9864682d74c1601fed5e07d62b1c9058dba8246fb"
dependencies = [
"autocfg",
]
[[package]]
name = "heck"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205"
dependencies = [
"unicode-segmentation",
]
[[package]]
name = "indexmap"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b88cd59ee5f71fea89a62248fc8f387d44400cefe05ef548466d61ced9029a7"
dependencies = [
"autocfg",
"hashbrown",
]
[[package]]
name = "itertools"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484"
dependencies = [
"either",
]
[[package]]
name = "libc"
version = "0.2.73"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd7d4bd64732af4bf3a67f367c27df8520ad7e230c5817b8ff485864d80242b9"
[[package]]
name = "log"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
dependencies = [
"cfg-if",
]
[[package]]
name = "multimap"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce"
[[package]]
name = "petgraph"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7"
dependencies = [
"fixedbitset",
"indexmap",
]
[[package]]
name = "ppv-lite86"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea"
[[package]]
name = "proc-macro2"
version = "1.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12"
dependencies = [
"unicode-xid",
]
[[package]]
name = "prost"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212"
dependencies = [
"bytes",
"prost-derive",
]
[[package]]
name = "prost-build"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02b10678c913ecbd69350e8535c3aef91a8676c0773fc1d7b95cdd196d7f2f26"
dependencies = [
"bytes",
"heck",
"itertools",
"log",
"multimap",
"petgraph",
"prost",
"prost-types",
"tempfile",
"which",
]
[[package]]
name = "prost-derive"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72"
dependencies = [
"anyhow",
"itertools",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "prost-types"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1834f67c0697c001304b75be76f67add9c89742eda3a085ad8ee0bb38c3417aa"
dependencies = [
"bytes",
"prost",
]
[[package]]
name = "proto"
version = "1.1.20"
dependencies = [
"tonic-build",
]
[[package]]
name = "quote"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
dependencies = [
"getrandom",
"libc",
"rand_chacha",
"rand_core",
"rand_hc",
]
[[package]]
name = "rand_chacha"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
dependencies = [
"getrandom",
]
[[package]]
name = "rand_hc"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
dependencies = [
"rand_core",
]
[[package]]
name = "redox_syscall"
version = "0.1.57"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
[[package]]
name = "remove_dir_all"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
dependencies = [
"winapi",
]
[[package]]
name = "syn"
version = "1.0.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "936cae2873c940d92e697597c5eee105fb570cd5689c695806f672883653349b"
dependencies = [
"proc-macro2",
"quote",
"unicode-xid",
]
[[package]]
name = "tempfile"
version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
dependencies = [
"cfg-if",
"libc",
"rand",
"redox_syscall",
"remove_dir_all",
"winapi",
]
[[package]]
name = "tonic-build"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71d8d21cb568e802d77055ab7fcd43f0992206de5028de95c8d3a41118d32e8e"
dependencies = [
"proc-macro2",
"prost-build",
"quote",
"syn",
]
[[package]]
name = "unicode-segmentation"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0"
[[package]]
name = "unicode-xid"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
[[package]]
name = "wasi"
version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]]
name = "which"
version = "3.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724"
dependencies = [
"libc",
]
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

View File

@ -0,0 +1,15 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.com>"]
description = "Blockchain, Rebuilt for Scale"
edition = "2018"
homepage = "https://solana.com/"
license = "Apache-2.0"
name = "proto"
publish = false
repository = "https://github.com/solana-labs/solana"
version = "1.1.20"
[workspace]
[dependencies]
tonic-build = "0.2.0"

View File

@ -0,0 +1,2 @@
Helper project to build Rust bindings for BigTable, to avoid requiring all
Solana developers have protoc installed

View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
set -ex
cd "$(dirname "$0")"
if [[ ! -d googleapis ]]; then
git clone https://github.com/googleapis/googleapis.git
fi
exec cargo run

View File

@ -0,0 +1,19 @@
fn main() -> Result<(), std::io::Error> {
let manifest_dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let out_dir = manifest_dir.join("../proto");
let googleapis = manifest_dir.join("googleapis");
println!("Google API directory: {}", googleapis.display());
println!("output directory: {}", out_dir.display());
tonic_build::configure()
.build_client(true)
.build_server(false)
.format(true)
.out_dir(&out_dir)
.compile(
&[googleapis.join("google/bigtable/v2/bigtable.proto")],
&[googleapis],
)
}

View File

@ -0,0 +1,27 @@
#!/usr/bin/env bash
#
# Configures a BigTable instance with the expected tables
#
set -e
instance=solana-ledger
cbt=(
cbt
-instance
"$instance"
)
if [[ -n $BIGTABLE_EMULATOR_HOST ]]; then
cbt+=(-project emulator)
fi
for table in blocks tx tx-by-addr; do
(
set -x
"${cbt[@]}" createtable $table
"${cbt[@]}" createfamily $table x
"${cbt[@]}" setgcpolicy $table x maxversions=1
"${cbt[@]}" setgcpolicy $table x maxage=360d
)
done

View File

@ -0,0 +1,632 @@
/// Defines the HTTP configuration for an API service. It contains a list of
/// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
/// to one or more HTTP REST API methods.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Http {
/// A list of HTTP configuration rules that apply to individual API methods.
///
/// **NOTE:** All service configuration rules follow "last one wins" order.
#[prost(message, repeated, tag = "1")]
pub rules: ::std::vec::Vec<HttpRule>,
/// When set to true, URL path parameters will be fully URI-decoded except in
/// cases of single segment matches in reserved expansion, where "%2F" will be
/// left encoded.
///
/// The default behavior is to not decode RFC 6570 reserved characters in multi
/// segment matches.
#[prost(bool, tag = "2")]
pub fully_decode_reserved_expansion: bool,
}
/// # gRPC Transcoding
///
/// gRPC Transcoding is a feature for mapping between a gRPC method and one or
/// more HTTP REST endpoints. It allows developers to build a single API service
/// that supports both gRPC APIs and REST APIs. Many systems, including [Google
/// APIs](https://github.com/googleapis/googleapis),
/// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC
/// Gateway](https://github.com/grpc-ecosystem/grpc-gateway),
/// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature
/// and use it for large scale production services.
///
/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies
/// how different portions of the gRPC request message are mapped to the URL
/// path, URL query parameters, and HTTP request body. It also controls how the
/// gRPC response message is mapped to the HTTP response body. `HttpRule` is
/// typically specified as an `google.api.http` annotation on the gRPC method.
///
/// Each mapping specifies a URL path template and an HTTP method. The path
/// template may refer to one or more fields in the gRPC request message, as long
/// as each field is a non-repeated field with a primitive (non-message) type.
/// The path template controls how fields of the request message are mapped to
/// the URL path.
///
/// Example:
///
/// service Messaging {
/// rpc GetMessage(GetMessageRequest) returns (Message) {
/// option (google.api.http) = {
/// get: "/v1/{name=messages/*}"
/// };
/// }
/// }
/// message GetMessageRequest {
/// string name = 1; // Mapped to URL path.
/// }
/// message Message {
/// string text = 1; // The resource content.
/// }
///
/// This enables an HTTP REST to gRPC mapping as below:
///
/// HTTP | gRPC
/// -----|-----
/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")`
///
/// Any fields in the request message which are not bound by the path template
/// automatically become HTTP query parameters if there is no HTTP request body.
/// For example:
///
/// service Messaging {
/// rpc GetMessage(GetMessageRequest) returns (Message) {
/// option (google.api.http) = {
/// get:"/v1/messages/{message_id}"
/// };
/// }
/// }
/// message GetMessageRequest {
/// message SubMessage {
/// string subfield = 1;
/// }
/// string message_id = 1; // Mapped to URL path.
/// int64 revision = 2; // Mapped to URL query parameter `revision`.
/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`.
/// }
///
/// This enables a HTTP JSON to RPC mapping as below:
///
/// HTTP | gRPC
/// -----|-----
/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` |
/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield:
/// "foo"))`
///
/// Note that fields which are mapped to URL query parameters must have a
/// primitive type or a repeated primitive type or a non-repeated message type.
/// In the case of a repeated type, the parameter can be repeated in the URL
/// as `...?param=A&param=B`. In the case of a message type, each field of the
/// message is mapped to a separate parameter, such as
/// `...?foo.a=A&foo.b=B&foo.c=C`.
///
/// For HTTP methods that allow a request body, the `body` field
/// specifies the mapping. Consider a REST update method on the
/// message resource collection:
///
/// service Messaging {
/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
/// option (google.api.http) = {
/// patch: "/v1/messages/{message_id}"
/// body: "message"
/// };
/// }
/// }
/// message UpdateMessageRequest {
/// string message_id = 1; // mapped to the URL
/// Message message = 2; // mapped to the body
/// }
///
/// The following HTTP JSON to RPC mapping is enabled, where the
/// representation of the JSON in the request body is determined by
/// protos JSON encoding:
///
/// HTTP | gRPC
/// -----|-----
/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
/// "123456" message { text: "Hi!" })`
///
/// The special name `*` can be used in the body mapping to define that
/// every field not bound by the path template should be mapped to the
/// request body. This enables the following alternative definition of
/// the update method:
///
/// service Messaging {
/// rpc UpdateMessage(Message) returns (Message) {
/// option (google.api.http) = {
/// patch: "/v1/messages/{message_id}"
/// body: "*"
/// };
/// }
/// }
/// message Message {
/// string message_id = 1;
/// string text = 2;
/// }
///
///
/// The following HTTP JSON to RPC mapping is enabled:
///
/// HTTP | gRPC
/// -----|-----
/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
/// "123456" text: "Hi!")`
///
/// Note that when using `*` in the body mapping, it is not possible to
/// have HTTP parameters, as all fields not bound by the path end in
/// the body. This makes this option more rarely used in practice when
/// defining REST APIs. The common usage of `*` is in custom methods
/// which don't use the URL at all for transferring data.
///
/// It is possible to define multiple HTTP methods for one RPC by using
/// the `additional_bindings` option. Example:
///
/// service Messaging {
/// rpc GetMessage(GetMessageRequest) returns (Message) {
/// option (google.api.http) = {
/// get: "/v1/messages/{message_id}"
/// additional_bindings {
/// get: "/v1/users/{user_id}/messages/{message_id}"
/// }
/// };
/// }
/// }
/// message GetMessageRequest {
/// string message_id = 1;
/// string user_id = 2;
/// }
///
/// This enables the following two alternative HTTP JSON to RPC mappings:
///
/// HTTP | gRPC
/// -----|-----
/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id:
/// "123456")`
///
/// ## Rules for HTTP mapping
///
/// 1. Leaf request fields (recursive expansion nested messages in the request
/// message) are classified into three categories:
/// - Fields referred by the path template. They are passed via the URL path.
/// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP
/// request body.
/// - All other fields are passed via the URL query parameters, and the
/// parameter name is the field path in the request message. A repeated
/// field can be represented as multiple query parameters under the same
/// name.
/// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields
/// are passed via URL path and HTTP request body.
/// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all
/// fields are passed via URL path and URL query parameters.
///
/// ### Path template syntax
///
/// Template = "/" Segments [ Verb ] ;
/// Segments = Segment { "/" Segment } ;
/// Segment = "*" | "**" | LITERAL | Variable ;
/// Variable = "{" FieldPath [ "=" Segments ] "}" ;
/// FieldPath = IDENT { "." IDENT } ;
/// Verb = ":" LITERAL ;
///
/// The syntax `*` matches a single URL path segment. The syntax `**` matches
/// zero or more URL path segments, which must be the last part of the URL path
/// except the `Verb`.
///
/// The syntax `Variable` matches part of the URL path as specified by its
/// template. A variable template must not contain other variables. If a variable
/// matches a single path segment, its template may be omitted, e.g. `{var}`
/// is equivalent to `{var=*}`.
///
/// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL`
/// contains any reserved character, such characters should be percent-encoded
/// before the matching.
///
/// If a variable contains exactly one path segment, such as `"{var}"` or
/// `"{var=*}"`, when such a variable is expanded into a URL path on the client
/// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The
/// server side does the reverse decoding. Such variables show up in the
/// [Discovery
/// Document](https://developers.google.com/discovery/v1/reference/apis) as
/// `{var}`.
///
/// If a variable contains multiple path segments, such as `"{var=foo/*}"`
/// or `"{var=**}"`, when such a variable is expanded into a URL path on the
/// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded.
/// The server side does the reverse decoding, except "%2F" and "%2f" are left
/// unchanged. Such variables show up in the
/// [Discovery
/// Document](https://developers.google.com/discovery/v1/reference/apis) as
/// `{+var}`.
///
/// ## Using gRPC API Service Configuration
///
/// gRPC API Service Configuration (service config) is a configuration language
/// for configuring a gRPC service to become a user-facing product. The
/// service config is simply the YAML representation of the `google.api.Service`
/// proto message.
///
/// As an alternative to annotating your proto file, you can configure gRPC
/// transcoding in your service config YAML files. You do this by specifying a
/// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same
/// effect as the proto annotation. This can be particularly useful if you
/// have a proto that is reused in multiple services. Note that any transcoding
/// specified in the service config will override any matching transcoding
/// configuration in the proto.
///
/// Example:
///
/// http:
/// rules:
/// # Selects a gRPC method and applies HttpRule to it.
/// - selector: example.v1.Messaging.GetMessage
/// get: /v1/messages/{message_id}/{sub.subfield}
///
/// ## Special notes
///
/// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the
/// proto to JSON conversion must follow the [proto3
/// specification](https://developers.google.com/protocol-buffers/docs/proto3#json).
///
/// While the single segment variable follows the semantics of
/// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
/// Expansion, the multi segment variable **does not** follow RFC 6570 Section
/// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion
/// does not expand special characters like `?` and `#`, which would lead
/// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding
/// for multi segment variables.
///
/// The path variables **must not** refer to any repeated or mapped field,
/// because client libraries are not capable of handling such variable expansion.
///
/// The path variables **must not** capture the leading "/" character. The reason
/// is that the most common use case "{var}" does not capture the leading "/"
/// character. For consistency, all path variables must share the same behavior.
///
/// Repeated message fields must not be mapped to URL query parameters, because
/// no client library can support such complicated mapping.
///
/// If an API needs to use a JSON array for request or response body, it can map
/// the request or response body to a repeated field. However, some gRPC
/// Transcoding implementations may not support this feature.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct HttpRule {
/// Selects a method to which this rule applies.
///
/// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
#[prost(string, tag = "1")]
pub selector: std::string::String,
/// The name of the request field whose value is mapped to the HTTP request
/// body, or `*` for mapping all request fields not captured by the path
/// pattern to the HTTP body, or omitted for not having any HTTP request body.
///
/// NOTE: the referred field must be present at the top-level of the request
/// message type.
#[prost(string, tag = "7")]
pub body: std::string::String,
/// Optional. The name of the response field whose value is mapped to the HTTP
/// response body. When omitted, the entire response message will be used
/// as the HTTP response body.
///
/// NOTE: The referred field must be present at the top-level of the response
/// message type.
#[prost(string, tag = "12")]
pub response_body: std::string::String,
/// Additional HTTP bindings for the selector. Nested bindings must
/// not contain an `additional_bindings` field themselves (that is,
/// the nesting may only be one level deep).
#[prost(message, repeated, tag = "11")]
pub additional_bindings: ::std::vec::Vec<HttpRule>,
/// Determines the URL pattern is matched by this rules. This pattern can be
/// used with any of the {get|put|post|delete|patch} methods. A custom method
/// can be defined using the 'custom' field.
#[prost(oneof = "http_rule::Pattern", tags = "2, 3, 4, 5, 6, 8")]
pub pattern: ::std::option::Option<http_rule::Pattern>,
}
pub mod http_rule {
/// Determines the URL pattern is matched by this rules. This pattern can be
/// used with any of the {get|put|post|delete|patch} methods. A custom method
/// can be defined using the 'custom' field.
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Pattern {
/// Maps to HTTP GET. Used for listing and getting information about
/// resources.
#[prost(string, tag = "2")]
Get(std::string::String),
/// Maps to HTTP PUT. Used for replacing a resource.
#[prost(string, tag = "3")]
Put(std::string::String),
/// Maps to HTTP POST. Used for creating a resource or performing an action.
#[prost(string, tag = "4")]
Post(std::string::String),
/// Maps to HTTP DELETE. Used for deleting a resource.
#[prost(string, tag = "5")]
Delete(std::string::String),
/// Maps to HTTP PATCH. Used for updating a resource.
#[prost(string, tag = "6")]
Patch(std::string::String),
/// The custom pattern is used for specifying an HTTP method that is not
/// included in the `pattern` field, such as HEAD, or "*" to leave the
/// HTTP method unspecified for this rule. The wild-card rule is useful
/// for services that provide content to Web (HTML) clients.
#[prost(message, tag = "8")]
Custom(super::CustomHttpPattern),
}
}
/// A custom pattern is used for defining custom HTTP verb.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CustomHttpPattern {
/// The name of this custom HTTP verb.
#[prost(string, tag = "1")]
pub kind: std::string::String,
/// The path matched by this custom verb.
#[prost(string, tag = "2")]
pub path: std::string::String,
}
/// An indicator of the behavior of a given field (for example, that a field
/// is required in requests, or given as output but ignored as input).
/// This **does not** change the behavior in protocol buffers itself; it only
/// denotes the behavior and may affect how API tooling handles the field.
///
/// Note: This enum **may** receive new values in the future.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum FieldBehavior {
/// Conventional default for enums. Do not use this.
Unspecified = 0,
/// Specifically denotes a field as optional.
/// While all fields in protocol buffers are optional, this may be specified
/// for emphasis if appropriate.
Optional = 1,
/// Denotes a field as required.
/// This indicates that the field **must** be provided as part of the request,
/// and failure to do so will cause an error (usually `INVALID_ARGUMENT`).
Required = 2,
/// Denotes a field as output only.
/// This indicates that the field is provided in responses, but including the
/// field in a request does nothing (the server *must* ignore it and
/// *must not* throw an error as a result of the field's presence).
OutputOnly = 3,
/// Denotes a field as input only.
/// This indicates that the field is provided in requests, and the
/// corresponding field is not included in output.
InputOnly = 4,
/// Denotes a field as immutable.
/// This indicates that the field may be set once in a request to create a
/// resource, but may not be changed thereafter.
Immutable = 5,
}
/// A simple descriptor of a resource type.
///
/// ResourceDescriptor annotates a resource message (either by means of a
/// protobuf annotation or use in the service config), and associates the
/// resource's schema, the resource type, and the pattern of the resource name.
///
/// Example:
///
/// message Topic {
/// // Indicates this message defines a resource schema.
/// // Declares the resource type in the format of {service}/{kind}.
/// // For Kubernetes resources, the format is {api group}/{kind}.
/// option (google.api.resource) = {
/// type: "pubsub.googleapis.com/Topic"
/// name_descriptor: {
/// pattern: "projects/{project}/topics/{topic}"
/// parent_type: "cloudresourcemanager.googleapis.com/Project"
/// parent_name_extractor: "projects/{project}"
/// }
/// };
/// }
///
/// The ResourceDescriptor Yaml config will look like:
///
/// resources:
/// - type: "pubsub.googleapis.com/Topic"
/// name_descriptor:
/// - pattern: "projects/{project}/topics/{topic}"
/// parent_type: "cloudresourcemanager.googleapis.com/Project"
/// parent_name_extractor: "projects/{project}"
///
/// Sometimes, resources have multiple patterns, typically because they can
/// live under multiple parents.
///
/// Example:
///
/// message LogEntry {
/// option (google.api.resource) = {
/// type: "logging.googleapis.com/LogEntry"
/// name_descriptor: {
/// pattern: "projects/{project}/logs/{log}"
/// parent_type: "cloudresourcemanager.googleapis.com/Project"
/// parent_name_extractor: "projects/{project}"
/// }
/// name_descriptor: {
/// pattern: "folders/{folder}/logs/{log}"
/// parent_type: "cloudresourcemanager.googleapis.com/Folder"
/// parent_name_extractor: "folders/{folder}"
/// }
/// name_descriptor: {
/// pattern: "organizations/{organization}/logs/{log}"
/// parent_type: "cloudresourcemanager.googleapis.com/Organization"
/// parent_name_extractor: "organizations/{organization}"
/// }
/// name_descriptor: {
/// pattern: "billingAccounts/{billing_account}/logs/{log}"
/// parent_type: "billing.googleapis.com/BillingAccount"
/// parent_name_extractor: "billingAccounts/{billing_account}"
/// }
/// };
/// }
///
/// The ResourceDescriptor Yaml config will look like:
///
/// resources:
/// - type: 'logging.googleapis.com/LogEntry'
/// name_descriptor:
/// - pattern: "projects/{project}/logs/{log}"
/// parent_type: "cloudresourcemanager.googleapis.com/Project"
/// parent_name_extractor: "projects/{project}"
/// - pattern: "folders/{folder}/logs/{log}"
/// parent_type: "cloudresourcemanager.googleapis.com/Folder"
/// parent_name_extractor: "folders/{folder}"
/// - pattern: "organizations/{organization}/logs/{log}"
/// parent_type: "cloudresourcemanager.googleapis.com/Organization"
/// parent_name_extractor: "organizations/{organization}"
/// - pattern: "billingAccounts/{billing_account}/logs/{log}"
/// parent_type: "billing.googleapis.com/BillingAccount"
/// parent_name_extractor: "billingAccounts/{billing_account}"
///
/// For flexible resources, the resource name doesn't contain parent names, but
/// the resource itself has parents for policy evaluation.
///
/// Example:
///
/// message Shelf {
/// option (google.api.resource) = {
/// type: "library.googleapis.com/Shelf"
/// name_descriptor: {
/// pattern: "shelves/{shelf}"
/// parent_type: "cloudresourcemanager.googleapis.com/Project"
/// }
/// name_descriptor: {
/// pattern: "shelves/{shelf}"
/// parent_type: "cloudresourcemanager.googleapis.com/Folder"
/// }
/// };
/// }
///
/// The ResourceDescriptor Yaml config will look like:
///
/// resources:
/// - type: 'library.googleapis.com/Shelf'
/// name_descriptor:
/// - pattern: "shelves/{shelf}"
/// parent_type: "cloudresourcemanager.googleapis.com/Project"
/// - pattern: "shelves/{shelf}"
/// parent_type: "cloudresourcemanager.googleapis.com/Folder"
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ResourceDescriptor {
/// The resource type. It must be in the format of
/// {service_name}/{resource_type_kind}. The `resource_type_kind` must be
/// singular and must not include version numbers.
///
/// Example: `storage.googleapis.com/Bucket`
///
/// The value of the resource_type_kind must follow the regular expression
/// /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and
/// should use PascalCase (UpperCamelCase). The maximum number of
/// characters allowed for the `resource_type_kind` is 100.
#[prost(string, tag = "1")]
pub r#type: std::string::String,
/// Optional. The relative resource name pattern associated with this resource
/// type. The DNS prefix of the full resource name shouldn't be specified here.
///
/// The path pattern must follow the syntax, which aligns with HTTP binding
/// syntax:
///
/// Template = Segment { "/" Segment } ;
/// Segment = LITERAL | Variable ;
/// Variable = "{" LITERAL "}" ;
///
/// Examples:
///
/// - "projects/{project}/topics/{topic}"
/// - "projects/{project}/knowledgeBases/{knowledge_base}"
///
/// The components in braces correspond to the IDs for each resource in the
/// hierarchy. It is expected that, if multiple patterns are provided,
/// the same component name (e.g. "project") refers to IDs of the same
/// type of resource.
#[prost(string, repeated, tag = "2")]
pub pattern: ::std::vec::Vec<std::string::String>,
/// Optional. The field on the resource that designates the resource name
/// field. If omitted, this is assumed to be "name".
#[prost(string, tag = "3")]
pub name_field: std::string::String,
/// Optional. The historical or future-looking state of the resource pattern.
///
/// Example:
///
/// // The InspectTemplate message originally only supported resource
/// // names with organization, and project was added later.
/// message InspectTemplate {
/// option (google.api.resource) = {
/// type: "dlp.googleapis.com/InspectTemplate"
/// pattern:
/// "organizations/{organization}/inspectTemplates/{inspect_template}"
/// pattern: "projects/{project}/inspectTemplates/{inspect_template}"
/// history: ORIGINALLY_SINGLE_PATTERN
/// };
/// }
#[prost(enumeration = "resource_descriptor::History", tag = "4")]
pub history: i32,
/// The plural name used in the resource name and permission names, such as
/// 'projects' for the resource name of 'projects/{project}' and the permission
/// name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same
/// concept of the `plural` field in k8s CRD spec
/// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
///
/// Note: The plural form is required even for singleton resources. See
/// https://aip.dev/156
#[prost(string, tag = "5")]
pub plural: std::string::String,
/// The same concept of the `singular` field in k8s CRD spec
/// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
/// Such as "project" for the `resourcemanager.googleapis.com/Project` type.
#[prost(string, tag = "6")]
pub singular: std::string::String,
}
pub mod resource_descriptor {
/// A description of the historical or future-looking state of the
/// resource pattern.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum History {
/// The "unset" value.
Unspecified = 0,
/// The resource originally had one pattern and launched as such, and
/// additional patterns were added later.
OriginallySinglePattern = 1,
/// The resource has one pattern, but the API owner expects to add more
/// later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents
/// that from being necessary once there are multiple patterns.)
FutureMultiPattern = 2,
}
}
/// Defines a proto annotation that describes a string field that refers to
/// an API resource.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ResourceReference {
/// The resource type that the annotated field references.
///
/// Example:
///
/// message Subscription {
/// string topic = 2 [(google.api.resource_reference) = {
/// type: "pubsub.googleapis.com/Topic"
/// }];
/// }
///
/// Occasionally, a field may reference an arbitrary resource. In this case,
/// APIs use the special value * in their resource reference.
///
/// Example:
///
/// message GetIamPolicyRequest {
/// string resource = 2 [(google.api.resource_reference) = {
/// type: "*"
/// }];
/// }
#[prost(string, tag = "1")]
pub r#type: std::string::String,
/// The resource type of a child collection that the annotated field
/// references. This is useful for annotating the `parent` field that
/// doesn't have a fixed resource type.
///
/// Example:
///
/// message ListLogEntriesRequest {
/// string parent = 1 [(google.api.resource_reference) = {
/// child_type: "logging.googleapis.com/LogEntry"
/// };
/// }
#[prost(string, tag = "2")]
pub child_type: std::string::String,
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,22 @@
/// The `Status` type defines a logical error model that is suitable for
/// different programming environments, including REST APIs and RPC APIs. It is
/// used by [gRPC](https://github.com/grpc). Each `Status` message contains
/// three pieces of data: error code, error message, and error details.
///
/// You can find out more about this error model and how to work with it in the
/// [API Design Guide](https://cloud.google.com/apis/design/errors).
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Status {
/// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
#[prost(int32, tag = "1")]
pub code: i32,
/// A developer-facing error message, which should be in English. Any
/// user-facing error message should be localized and sent in the
/// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
#[prost(string, tag = "2")]
pub message: std::string::String,
/// A list of messages that carry the error details. There is a common set of
/// message types for APIs to use.
#[prost(message, repeated, tag = "3")]
pub details: ::std::vec::Vec<::prost_types::Any>,
}

View File

@ -0,0 +1,118 @@
/// A module for managing a Google API access token
use goauth::{
auth::{JwtClaims, Token},
credentials::Credentials,
};
use log::*;
use smpl_jwt::Jwt;
use std::{
sync::{
atomic::{AtomicBool, Ordering},
{Arc, RwLock},
},
time::Instant,
};
pub use goauth::scopes::Scope;
fn load_credentials() -> Result<Credentials, String> {
// Use standard GOOGLE_APPLICATION_CREDENTIALS environment variable
let credentials_file = std::env::var("GOOGLE_APPLICATION_CREDENTIALS")
.map_err(|_| "GOOGLE_APPLICATION_CREDENTIALS environment variable not found".to_string())?;
Credentials::from_file(&credentials_file).map_err(|err| {
format!(
"Failed to read GCP credentials from {}: {}",
credentials_file, err
)
})
}
#[derive(Clone)]
pub struct AccessToken {
credentials: Credentials,
scope: Scope,
refresh_active: Arc<AtomicBool>,
token: Arc<RwLock<(Token, Instant)>>,
}
impl AccessToken {
pub async fn new(scope: Scope) -> Result<Self, String> {
let credentials = load_credentials()?;
if let Err(err) = credentials.rsa_key() {
Err(format!("Invalid rsa key: {}", err))
} else {
let token = Arc::new(RwLock::new(Self::get_token(&credentials, &scope).await?));
let access_token = Self {
credentials,
scope,
token,
refresh_active: Arc::new(AtomicBool::new(false)),
};
Ok(access_token)
}
}
/// The project that this token grants access to
pub fn project(&self) -> String {
self.credentials.project()
}
async fn get_token(
credentials: &Credentials,
scope: &Scope,
) -> Result<(Token, Instant), String> {
info!("Requesting token for {:?} scope", scope);
let claims = JwtClaims::new(
credentials.iss(),
scope,
credentials.token_uri(),
None,
None,
);
let jwt = Jwt::new(claims, credentials.rsa_key().unwrap(), None);
let token = goauth::get_token(&jwt, credentials)
.await
.map_err(|err| format!("Failed to refresh access token: {}", err))?;
info!("Token expires in {} seconds", token.expires_in());
Ok((token, Instant::now()))
}
/// Call this function regularly to ensure the access token does not expire
pub async fn refresh(&self) {
// Check if it's time to try a token refresh
{
let token_r = self.token.read().unwrap();
if token_r.1.elapsed().as_secs() < token_r.0.expires_in() as u64 / 2 {
return;
}
if self
.refresh_active
.compare_and_swap(false, true, Ordering::Relaxed)
{
// Refresh already pending
return;
}
}
info!("Refreshing token");
let new_token = Self::get_token(&self.credentials, &self.scope).await;
{
let mut token_w = self.token.write().unwrap();
match new_token {
Ok(new_token) => *token_w = new_token,
Err(err) => warn!("{}", err),
}
self.refresh_active.store(false, Ordering::Relaxed);
}
}
/// Return an access token suitable for use in an HTTP authorization header
pub fn get(&self) -> String {
let token_r = self.token.read().unwrap();
format!("{} {}", token_r.0.token_type(), token_r.0.access_token())
}
}

View File

@ -0,0 +1,466 @@
// Primitives for reading/writing BigTable tables
use crate::access_token::{AccessToken, Scope};
use crate::compression::{compress_best, decompress};
use crate::root_ca_certificate;
use log::*;
use thiserror::Error;
use tonic::{metadata::MetadataValue, transport::ClientTlsConfig, Request};
mod google {
mod rpc {
include!(concat!(
env!("CARGO_MANIFEST_DIR"),
concat!("/proto/google.rpc.rs")
));
}
pub mod bigtable {
pub mod v2 {
include!(concat!(
env!("CARGO_MANIFEST_DIR"),
concat!("/proto/google.bigtable.v2.rs")
));
}
}
}
use google::bigtable::v2::*;
pub type RowKey = String;
pub type CellName = String;
pub type CellValue = Vec<u8>;
pub type RowData = Vec<(CellName, CellValue)>;
#[derive(Debug, Error)]
pub enum Error {
#[error("AccessToken error: {0}")]
AccessTokenError(String),
#[error("Certificate error: {0}")]
CertificateError(String),
#[error("I/O Error: {0}")]
IoError(std::io::Error),
#[error("Transport error: {0}")]
TransportError(tonic::transport::Error),
#[error("Invalid URI {0}: {1}")]
InvalidUri(String, String),
#[error("Row not found")]
RowNotFound,
#[error("Row write failed")]
RowWriteFailed,
#[error("Object not found: {0}")]
ObjectNotFound(String),
#[error("Object is corrupt: {0}")]
ObjectCorrupt(String),
#[error("RPC error: {0}")]
RpcError(tonic::Status),
}
impl std::convert::From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
Self::IoError(err)
}
}
impl std::convert::From<tonic::transport::Error> for Error {
fn from(err: tonic::transport::Error) -> Self {
Self::TransportError(err)
}
}
impl std::convert::From<tonic::Status> for Error {
fn from(err: tonic::Status) -> Self {
Self::RpcError(err)
}
}
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Clone)]
pub struct BigTableConnection {
access_token: Option<AccessToken>,
channel: tonic::transport::Channel,
table_prefix: String,
}
impl BigTableConnection {
/// Establish a connection to the BigTable instance named `instance_name`. If read-only access
/// is required, the `read_only` flag should be used to reduce the requested OAuth2 scope.
///
/// The GOOGLE_APPLICATION_CREDENTIALS environment variable will be used to determine the
/// program name that contains the BigTable instance in addition to access credentials.
///
/// The BIGTABLE_EMULATOR_HOST environment variable is also respected.
///
pub async fn new(instance_name: &str, read_only: bool) -> Result<Self> {
match std::env::var("BIGTABLE_EMULATOR_HOST") {
Ok(endpoint) => {
info!("Connecting to bigtable emulator at {}", endpoint);
Ok(Self {
access_token: None,
channel: tonic::transport::Channel::from_shared(format!("http://{}", endpoint))
.map_err(|err| Error::InvalidUri(endpoint, err.to_string()))?
.connect_lazy()?,
table_prefix: format!("projects/emulator/instances/{}/tables/", instance_name),
})
}
Err(_) => {
let access_token = AccessToken::new(if read_only {
Scope::BigTableDataReadOnly
} else {
Scope::BigTableData
})
.await
.map_err(Error::AccessTokenError)?;
let table_prefix = format!(
"projects/{}/instances/{}/tables/",
access_token.project(),
instance_name
);
Ok(Self {
access_token: Some(access_token),
channel: tonic::transport::Channel::from_static(
"https://bigtable.googleapis.com",
)
.tls_config(
ClientTlsConfig::new()
.ca_certificate(
root_ca_certificate::load().map_err(Error::CertificateError)?,
)
.domain_name("bigtable.googleapis.com"),
)?
.connect_lazy()?,
table_prefix,
})
}
}
}
/// Create a new BigTable client.
///
/// Clients require `&mut self`, due to `Tonic::transport::Channel` limitations, however
/// creating new clients is cheap and thus can be used as a work around for ease of use.
pub fn client(&self) -> BigTable {
let client = if let Some(access_token) = &self.access_token {
let access_token = access_token.clone();
bigtable_client::BigtableClient::with_interceptor(
self.channel.clone(),
move |mut req: Request<()>| {
match MetadataValue::from_str(&access_token.get()) {
Ok(authorization_header) => {
req.metadata_mut()
.insert("authorization", authorization_header);
}
Err(err) => {
warn!("Failed to set authorization header: {}", err);
}
}
Ok(req)
},
)
} else {
bigtable_client::BigtableClient::new(self.channel.clone())
};
BigTable {
access_token: self.access_token.clone(),
client,
table_prefix: self.table_prefix.clone(),
}
}
pub async fn put_bincode_cells_with_retry<T>(
&self,
table: &str,
cells: &[(RowKey, T)],
) -> Result<usize>
where
T: serde::ser::Serialize,
{
use backoff::{future::FutureOperation as _, ExponentialBackoff};
(|| async {
let mut client = self.client();
Ok(client.put_bincode_cells(table, cells).await?)
})
.retry(ExponentialBackoff::default())
.await
}
}
pub struct BigTable {
access_token: Option<AccessToken>,
client: bigtable_client::BigtableClient<tonic::transport::Channel>,
table_prefix: String,
}
impl BigTable {
async fn decode_read_rows_response(
mut rrr: tonic::codec::Streaming<ReadRowsResponse>,
) -> Result<Vec<(RowKey, RowData)>> {
let mut rows: Vec<(RowKey, RowData)> = vec![];
let mut row_key = None;
let mut row_data = vec![];
let mut cell_name = None;
let mut cell_timestamp = 0;
let mut cell_value = vec![];
let mut cell_version_ok = true;
while let Some(res) = rrr.message().await? {
for (i, mut chunk) in res.chunks.into_iter().enumerate() {
// The comments for `read_rows_response::CellChunk` provide essential details for
// understanding how the below decoding works...
trace!("chunk {}: {:?}", i, chunk);
// Starting a new row?
if !chunk.row_key.is_empty() {
row_key = String::from_utf8(chunk.row_key).ok(); // Require UTF-8 for row keys
}
// Starting a new cell?
if let Some(qualifier) = chunk.qualifier {
if let Some(cell_name) = cell_name {
row_data.push((cell_name, cell_value));
cell_value = vec![];
}
cell_name = String::from_utf8(qualifier).ok(); // Require UTF-8 for cell names
cell_timestamp = chunk.timestamp_micros;
cell_version_ok = true;
} else {
// Continuing the existing cell. Check if this is the start of another version of the cell
if chunk.timestamp_micros != 0 {
if chunk.timestamp_micros < cell_timestamp {
cell_version_ok = false; // ignore older versions of the cell
} else {
// newer version of the cell, remove the older cell
cell_version_ok = true;
cell_value = vec![];
cell_timestamp = chunk.timestamp_micros;
}
}
}
if cell_version_ok {
cell_value.append(&mut chunk.value);
}
// End of a row?
if chunk.row_status.is_some() {
if let Some(read_rows_response::cell_chunk::RowStatus::CommitRow(_)) =
chunk.row_status
{
if let Some(cell_name) = cell_name {
row_data.push((cell_name, cell_value));
}
if let Some(row_key) = row_key {
rows.push((row_key, row_data))
}
}
row_key = None;
row_data = vec![];
cell_value = vec![];
cell_name = None;
}
}
}
Ok(rows)
}
async fn refresh_access_token(&self) {
if let Some(ref access_token) = self.access_token {
access_token.refresh().await;
}
}
/// Get `table` row keys in lexical order.
///
/// If `start_at` is provided, the row key listing will start with key.
/// Otherwise the listing will start from the start of the table.
pub async fn get_row_keys(
&mut self,
table_name: &str,
start_at: Option<RowKey>,
rows_limit: i64,
) -> Result<Vec<RowKey>> {
self.refresh_access_token().await;
let response = self
.client
.read_rows(ReadRowsRequest {
table_name: format!("{}{}", self.table_prefix, table_name),
rows_limit,
rows: Some(RowSet {
row_keys: vec![],
row_ranges: if let Some(row_key) = start_at {
vec![RowRange {
start_key: Some(row_range::StartKey::StartKeyClosed(
row_key.into_bytes(),
)),
end_key: None,
}]
} else {
vec![]
},
}),
filter: Some(RowFilter {
filter: Some(row_filter::Filter::Chain(row_filter::Chain {
filters: vec![
RowFilter {
// Return minimal number of cells
filter: Some(row_filter::Filter::CellsPerRowLimitFilter(1)),
},
RowFilter {
// Only return the latest version of each cell
filter: Some(row_filter::Filter::CellsPerColumnLimitFilter(1)),
},
RowFilter {
// Strip the cell values
filter: Some(row_filter::Filter::StripValueTransformer(true)),
},
],
})),
}),
..ReadRowsRequest::default()
})
.await?
.into_inner();
let rows = Self::decode_read_rows_response(response).await?;
Ok(rows.into_iter().map(|r| r.0).collect())
}
/// Get latest data from `limit` rows of `table`, starting inclusively at the `row_key` row.
///
/// All column families are accepted, and only the latest version of each column cell will be
/// returned.
pub async fn get_row_data(&mut self, table_name: &str, row_key: RowKey) -> Result<RowData> {
self.refresh_access_token().await;
let response = self
.client
.read_rows(ReadRowsRequest {
table_name: format!("{}{}", self.table_prefix, table_name),
rows_limit: 1,
rows: Some(RowSet {
row_keys: vec![row_key.into_bytes()],
row_ranges: vec![],
}),
filter: Some(RowFilter {
// Only return the latest version of each cell
filter: Some(row_filter::Filter::CellsPerColumnLimitFilter(1)),
}),
..ReadRowsRequest::default()
})
.await?
.into_inner();
let rows = Self::decode_read_rows_response(response).await?;
rows.into_iter()
.next()
.map(|r| r.1)
.ok_or_else(|| Error::RowNotFound)
}
/// Store data for one or more `table` rows in the `family_name` Column family
async fn put_row_data(
&mut self,
table_name: &str,
family_name: &str,
row_data: &[(&RowKey, RowData)],
) -> Result<()> {
self.refresh_access_token().await;
let mut entries = vec![];
for (row_key, row_data) in row_data {
let mutations = row_data
.iter()
.map(|(column_key, column_value)| Mutation {
mutation: Some(mutation::Mutation::SetCell(mutation::SetCell {
family_name: family_name.to_string(),
column_qualifier: column_key.clone().into_bytes(),
timestamp_micros: -1, // server assigned
value: column_value.to_vec(),
})),
})
.collect();
entries.push(mutate_rows_request::Entry {
row_key: (*row_key).clone().into_bytes(),
mutations,
});
}
let mut response = self
.client
.mutate_rows(MutateRowsRequest {
table_name: format!("{}{}", self.table_prefix, table_name),
entries,
..MutateRowsRequest::default()
})
.await?
.into_inner();
while let Some(res) = response.message().await? {
for entry in res.entries {
if let Some(status) = entry.status {
if status.code != 0 {
eprintln!("put_row_data error {}: {}", status.code, status.message);
warn!("put_row_data error {}: {}", status.code, status.message);
return Err(Error::RowWriteFailed);
}
}
}
}
Ok(())
}
pub async fn get_bincode_cell<T>(&mut self, table: &str, key: RowKey) -> Result<T>
where
T: serde::de::DeserializeOwned,
{
let row_data = self.get_row_data(table, key.clone()).await?;
let value = row_data
.into_iter()
.find(|(name, _)| name == "bin")
.ok_or_else(|| Error::ObjectNotFound(format!("{}/{}", table, key)))?
.1;
let data = decompress(&value)?;
bincode::deserialize(&data).map_err(|err| {
warn!("Failed to deserialize {}/{}: {}", table, key, err);
Error::ObjectCorrupt(format!("{}/{}", table, key))
})
}
pub async fn put_bincode_cells<T>(
&mut self,
table: &str,
cells: &[(RowKey, T)],
) -> Result<usize>
where
T: serde::ser::Serialize,
{
let mut bytes_written = 0;
let mut new_row_data = vec![];
for (row_key, data) in cells {
let data = compress_best(&bincode::serialize(&data).unwrap())?;
bytes_written += data.len();
new_row_data.push((row_key, vec![("bin".to_string(), data)]));
}
self.put_row_data(table, "x", &new_row_data).await?;
Ok(bytes_written)
}
}

View File

@ -0,0 +1,105 @@
use enum_iterator::IntoEnumIterator;
use std::io::{self, BufReader, Read, Write};
#[derive(Debug, Serialize, Deserialize, IntoEnumIterator)]
pub enum CompressionMethod {
NoCompression,
Bzip2,
Gzip,
Zstd,
}
fn decompress_reader<'a, R: Read + 'a>(
method: CompressionMethod,
stream: R,
) -> Result<Box<dyn Read + 'a>, io::Error> {
let buf_reader = BufReader::new(stream);
let decompress_reader: Box<dyn Read> = match method {
CompressionMethod::Bzip2 => Box::new(bzip2::bufread::BzDecoder::new(buf_reader)),
CompressionMethod::Gzip => Box::new(flate2::read::GzDecoder::new(buf_reader)),
CompressionMethod::Zstd => Box::new(zstd::stream::read::Decoder::new(buf_reader)?),
CompressionMethod::NoCompression => Box::new(buf_reader),
};
Ok(decompress_reader)
}
pub fn decompress(data: &[u8]) -> Result<Vec<u8>, io::Error> {
let method_size = bincode::serialized_size(&CompressionMethod::NoCompression).unwrap();
if (data.len() as u64) < method_size {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("data len too small: {}", data.len()),
));
}
let method = bincode::deserialize(&data[..method_size as usize]).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("method deserialize failed: {}", err),
)
})?;
let mut reader = decompress_reader(method, &data[method_size as usize..])?;
let mut uncompressed_data = vec![];
reader.read_to_end(&mut uncompressed_data)?;
Ok(uncompressed_data)
}
pub fn compress(method: CompressionMethod, data: &[u8]) -> Result<Vec<u8>, io::Error> {
let mut compressed_data = bincode::serialize(&method).unwrap();
compressed_data.extend(
match method {
CompressionMethod::Bzip2 => {
let mut e = bzip2::write::BzEncoder::new(Vec::new(), bzip2::Compression::Best);
e.write_all(data)?;
e.finish()?
}
CompressionMethod::Gzip => {
let mut e =
flate2::write::GzEncoder::new(Vec::new(), flate2::Compression::default());
e.write_all(data)?;
e.finish()?
}
CompressionMethod::Zstd => {
let mut e = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
e.write_all(data)?;
e.finish()?
}
CompressionMethod::NoCompression => data.to_vec(),
}
.into_iter(),
);
Ok(compressed_data)
}
pub fn compress_best(data: &[u8]) -> Result<Vec<u8>, io::Error> {
let mut candidates = vec![];
for method in CompressionMethod::into_enum_iter() {
candidates.push(compress(method, data)?);
}
Ok(candidates
.into_iter()
.min_by(|a, b| a.len().cmp(&b.len()))
.unwrap())
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_compress_uncompress() {
let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
assert_eq!(
decompress(&compress_best(&data).expect("compress_best")).expect("decompress"),
data
);
}
#[test]
fn test_compress() {
let data = vec![0; 256];
assert!(compress_best(&data).expect("compress_best").len() < data.len());
}
}

537
storage-bigtable/src/lib.rs Normal file
View File

@ -0,0 +1,537 @@
use log::*;
use serde::{Deserialize, Serialize};
use solana_sdk::{
clock::{Slot, UnixTimestamp},
pubkey::Pubkey,
signature::Signature,
sysvar::is_sysvar_id,
transaction::{Transaction, TransactionError},
};
use solana_transaction_status::{
ConfirmedBlock, ConfirmedTransaction, ConfirmedTransactionStatusWithSignature,
EncodedTransaction, Rewards, TransactionStatus, TransactionWithStatusMeta,
UiTransactionEncoding, UiTransactionStatusMeta,
};
use std::{
collections::HashMap,
convert::{TryFrom, TryInto},
};
use thiserror::Error;
#[macro_use]
extern crate serde_derive;
mod access_token;
mod bigtable;
mod compression;
mod root_ca_certificate;
#[derive(Debug, Error)]
pub enum Error {
#[error("BigTable: {0}")]
BigTableError(bigtable::Error),
#[error("I/O Error: {0}")]
IoError(std::io::Error),
#[error("Transaction encoded is not supported")]
UnsupportedTransactionEncoding,
#[error("Block not found: {0}")]
BlockNotFound(Slot),
#[error("Signature not found")]
SignatureNotFound,
}
impl std::convert::From<bigtable::Error> for Error {
fn from(err: bigtable::Error) -> Self {
Self::BigTableError(err)
}
}
impl std::convert::From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
Self::IoError(err)
}
}
pub type Result<T> = std::result::Result<T, Error>;
// Convert a slot to its bucket representation whereby lower slots are always lexically ordered
// before higher slots
fn slot_to_key(slot: Slot) -> String {
format!("{:016x}", slot)
}
// Reverse of `slot_to_key`
fn key_to_slot(key: &str) -> Option<Slot> {
match Slot::from_str_radix(key, 16) {
Ok(slot) => Some(slot),
Err(err) => {
// bucket data is probably corrupt
warn!("Failed to parse object key as a slot: {}: {}", key, err);
None
}
}
}
// A serialized `StoredConfirmedBlock` is stored in the `block` table
//
// StoredConfirmedBlock holds the same contents as ConfirmedBlock, but is slightly compressed and avoids
// some serde JSON directives that cause issues with bincode
//
#[derive(Serialize, Deserialize)]
struct StoredConfirmedBlock {
previous_blockhash: String,
blockhash: String,
parent_slot: Slot,
transactions: Vec<StoredConfirmedBlockTransaction>,
rewards: Rewards,
block_time: Option<UnixTimestamp>,
}
impl StoredConfirmedBlock {
fn into_confirmed_block(self, encoding: UiTransactionEncoding) -> ConfirmedBlock {
let StoredConfirmedBlock {
previous_blockhash,
blockhash,
parent_slot,
transactions,
rewards,
block_time,
} = self;
ConfirmedBlock {
previous_blockhash,
blockhash,
parent_slot,
transactions: transactions
.into_iter()
.map(|transaction| transaction.into_transaction_with_status_meta(encoding))
.collect(),
rewards,
block_time,
}
}
}
impl TryFrom<ConfirmedBlock> for StoredConfirmedBlock {
type Error = Error;
fn try_from(confirmed_block: ConfirmedBlock) -> Result<Self> {
let ConfirmedBlock {
previous_blockhash,
blockhash,
parent_slot,
transactions,
rewards,
block_time,
} = confirmed_block;
let mut encoded_transactions = vec![];
for transaction in transactions.into_iter() {
encoded_transactions.push(transaction.try_into()?);
}
Ok(Self {
previous_blockhash,
blockhash,
parent_slot,
transactions: encoded_transactions,
rewards,
block_time,
})
}
}
#[derive(Serialize, Deserialize)]
struct StoredConfirmedBlockTransaction {
transaction: Transaction,
meta: Option<StoredConfirmedBlockTransactionStatusMeta>,
}
impl StoredConfirmedBlockTransaction {
fn into_transaction_with_status_meta(
self,
encoding: UiTransactionEncoding,
) -> TransactionWithStatusMeta {
let StoredConfirmedBlockTransaction { transaction, meta } = self;
TransactionWithStatusMeta {
transaction: EncodedTransaction::encode(transaction, encoding),
meta: meta.map(|meta| meta.into()),
}
}
}
impl TryFrom<TransactionWithStatusMeta> for StoredConfirmedBlockTransaction {
type Error = Error;
fn try_from(value: TransactionWithStatusMeta) -> Result<Self> {
let TransactionWithStatusMeta { transaction, meta } = value;
Ok(Self {
transaction: transaction
.decode()
.ok_or(Error::UnsupportedTransactionEncoding)?,
meta: meta.map(|meta| meta.into()),
})
}
}
#[derive(Serialize, Deserialize)]
struct StoredConfirmedBlockTransactionStatusMeta {
err: Option<TransactionError>,
fee: u64,
pre_balances: Vec<u64>,
post_balances: Vec<u64>,
}
impl From<StoredConfirmedBlockTransactionStatusMeta> for UiTransactionStatusMeta {
fn from(value: StoredConfirmedBlockTransactionStatusMeta) -> Self {
let StoredConfirmedBlockTransactionStatusMeta {
err,
fee,
pre_balances,
post_balances,
} = value;
let status = match &err {
None => Ok(()),
Some(err) => Err(err.clone()),
};
Self {
err,
status,
fee,
pre_balances,
post_balances,
}
}
}
impl From<UiTransactionStatusMeta> for StoredConfirmedBlockTransactionStatusMeta {
fn from(value: UiTransactionStatusMeta) -> Self {
let UiTransactionStatusMeta {
err,
fee,
pre_balances,
post_balances,
..
} = value;
Self {
err,
fee,
pre_balances,
post_balances,
}
}
}
// A serialized `TransactionInfo` is stored in the `tx` table
#[derive(Serialize, Deserialize)]
struct TransactionInfo {
slot: Slot, // The slot that contains the block with this transaction in it
index: u32, // Where the transaction is located in the block
err: Option<TransactionError>, // None if the transaction executed successfully
memo: Option<String>, // Transaction memo
}
impl From<TransactionInfo> for TransactionStatus {
fn from(transaction_info: TransactionInfo) -> Self {
let TransactionInfo { slot, err, .. } = transaction_info;
let status = match &err {
None => Ok(()),
Some(err) => Err(err.clone()),
};
Self {
slot,
confirmations: None,
status,
err,
}
}
}
// A serialized `Vec<TransactionByAddrInfo>` is stored in the `tx-by-addr` table. The row keys are
// the one's compliment of the slot so that rows may be listed in reverse order
#[derive(Serialize, Deserialize)]
struct TransactionByAddrInfo {
signature: Signature, // The transaction signature
err: Option<TransactionError>, // None if the transaction executed successfully
index: u32, // Where the transaction is located in the block
memo: Option<String>, // Transaction memo
}
#[derive(Clone)]
pub struct LedgerStorage {
connection: bigtable::BigTableConnection,
}
impl LedgerStorage {
pub async fn new(read_only: bool) -> Result<Self> {
let connection = bigtable::BigTableConnection::new("solana-ledger", read_only).await?;
Ok(Self { connection })
}
/// Return the available slot that contains a block
pub async fn get_first_available_block(&self) -> Result<Option<Slot>> {
let mut bigtable = self.connection.client();
let blocks = bigtable.get_row_keys("blocks", None, 1).await?;
if blocks.is_empty() {
return Ok(None);
}
Ok(key_to_slot(&blocks[0]))
}
/// Fetch the next slots after the provided slot that contains a block
///
/// start_slot: slot to start the search from (inclusive)
/// limit: stop after this many slots have been found.
pub async fn get_confirmed_blocks(&self, start_slot: Slot, limit: usize) -> Result<Vec<Slot>> {
let mut bigtable = self.connection.client();
let blocks = bigtable
.get_row_keys("blocks", Some(slot_to_key(start_slot)), limit as i64)
.await?;
Ok(blocks.into_iter().filter_map(|s| key_to_slot(&s)).collect())
}
/// Fetch the confirmed block from the desired slot
pub async fn get_confirmed_block(
&self,
slot: Slot,
encoding: UiTransactionEncoding,
) -> Result<ConfirmedBlock> {
let mut bigtable = self.connection.client();
let block = bigtable
.get_bincode_cell::<StoredConfirmedBlock>("blocks", slot_to_key(slot))
.await?;
Ok(block.into_confirmed_block(encoding))
}
pub async fn get_signature_status(&self, signature: &Signature) -> Result<TransactionStatus> {
let mut bigtable = self.connection.client();
let transaction_info = bigtable
.get_bincode_cell::<TransactionInfo>("tx", signature.to_string())
.await?;
Ok(transaction_info.into())
}
/// Fetch a confirmed transaction
pub async fn get_confirmed_transaction(
&self,
signature: &Signature,
encoding: UiTransactionEncoding,
) -> Result<Option<ConfirmedTransaction>> {
let mut bigtable = self.connection.client();
// Figure out which block the transaction is located in
let TransactionInfo { slot, index, .. } = bigtable
.get_bincode_cell("tx", signature.to_string())
.await?;
// Load the block and return the transaction
let block = bigtable
.get_bincode_cell::<StoredConfirmedBlock>("blocks", slot_to_key(slot))
.await?;
match block.transactions.into_iter().nth(index as usize) {
None => {
warn!("Transaction info for {} is corrupt", signature);
Ok(None)
}
Some(bucket_block_transaction) => {
if bucket_block_transaction.transaction.signatures[0] != *signature {
warn!(
"Transaction info or confirmed block for {} is corrupt",
signature
);
Ok(None)
} else {
Ok(Some(ConfirmedTransaction {
slot,
transaction: bucket_block_transaction
.into_transaction_with_status_meta(encoding),
}))
}
}
}
}
/// Get confirmed signatures for the provided address, in descending ledger order
///
/// address: address to search for
/// before_signature: start with the first signature older than this one
/// limit: stop after this many signatures.
pub async fn get_confirmed_signatures_for_address(
&self,
address: &Pubkey,
before_signature: Option<&Signature>,
limit: usize,
) -> Result<Vec<ConfirmedTransactionStatusWithSignature>> {
let mut bigtable = self.connection.client();
let address_prefix = format!("{}/", address);
// Figure out where to start listing from based on `before_signature`
let (first_slot, mut first_transaction_index) = match before_signature {
None => (Slot::MAX, 0),
Some(before_signature) => {
let TransactionInfo { slot, index, .. } = bigtable
.get_bincode_cell("tx", before_signature.to_string())
.await?;
(slot, index + 1)
}
};
let mut infos = vec![];
// Return the next `limit` tx-by-addr keys
let tx_by_addr_info_keys = bigtable
.get_row_keys(
"tx-by-addr",
Some(format!("{}{}", address_prefix, slot_to_key(!first_slot))),
limit as i64,
)
.await?;
// Read each tx-by-addr object until `limit` signatures have been found
'outer: for key in tx_by_addr_info_keys {
trace!("key is {}: slot is {}", key, &key[address_prefix.len()..]);
if !key.starts_with(&address_prefix) {
break 'outer;
}
let slot = !key_to_slot(&key[address_prefix.len()..]).ok_or_else(|| {
bigtable::Error::ObjectCorrupt(format!(
"Failed to convert key to slot: tx-by-addr/{}",
key
))
})?;
let tx_by_addr_infos = bigtable
.get_bincode_cell::<Vec<TransactionByAddrInfo>>("tx-by-addr", key)
.await?;
for tx_by_addr_info in tx_by_addr_infos
.into_iter()
.skip(first_transaction_index as usize)
{
infos.push(ConfirmedTransactionStatusWithSignature {
signature: tx_by_addr_info.signature,
slot,
err: tx_by_addr_info.err,
memo: tx_by_addr_info.memo,
});
if infos.len() >= limit {
break 'outer;
}
}
first_transaction_index = 0;
}
Ok(infos)
}
// Upload a new confirmed block and associated meta data.
pub async fn upload_confirmed_block(
&self,
slot: Slot,
confirmed_block: ConfirmedBlock,
) -> Result<()> {
let mut bytes_written = 0;
let mut by_addr: HashMap<Pubkey, Vec<TransactionByAddrInfo>> = HashMap::new();
let mut tx_cells = vec![];
for (index, transaction_with_meta) in confirmed_block.transactions.iter().enumerate() {
let err = transaction_with_meta
.meta
.as_ref()
.and_then(|meta| meta.err.clone());
let index = index as u32;
let transaction = transaction_with_meta
.transaction
.decode()
.expect("transaction decode failed");
let signature = transaction.signatures[0];
for address in transaction.message.account_keys {
if !is_sysvar_id(&address) {
by_addr
.entry(address)
.or_default()
.push(TransactionByAddrInfo {
signature,
err: err.clone(),
index,
memo: None, // TODO
});
}
}
tx_cells.push((
signature.to_string(),
TransactionInfo {
slot,
index,
err,
memo: None, // TODO
},
));
}
let tx_by_addr_cells: Vec<_> = by_addr
.into_iter()
.map(|(address, transaction_info_by_addr)| {
(
format!("{}/{}", address, slot_to_key(!slot)),
transaction_info_by_addr,
)
})
.collect();
if !tx_cells.is_empty() {
bytes_written += self
.connection
.put_bincode_cells_with_retry::<TransactionInfo>("tx", &tx_cells)
.await?;
}
if !tx_by_addr_cells.is_empty() {
bytes_written += self
.connection
.put_bincode_cells_with_retry::<Vec<TransactionByAddrInfo>>(
"tx-by-addr",
&tx_by_addr_cells,
)
.await?;
}
let num_transactions = confirmed_block.transactions.len();
// Store the block itself last, after all other metadata about the block has been
// successfully stored. This avoids partial uploaded blocks from becoming visible to
// `get_confirmed_block()` and `get_confirmed_blocks()`
let blocks_cells = [(slot_to_key(slot), confirmed_block.try_into()?)];
bytes_written += self
.connection
.put_bincode_cells_with_retry::<StoredConfirmedBlock>("blocks", &blocks_cells)
.await?;
info!(
"uploaded block for slot {}: {} transactions, {} bytes",
slot, num_transactions, bytes_written
);
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_slot_to_key() {
assert_eq!(slot_to_key(0), "0000000000000000");
assert_eq!(slot_to_key(!0), "ffffffffffffffff");
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,20 @@
use std::{fs::File, io::Read};
use tonic::transport::Certificate;
pub fn load() -> Result<Certificate, String> {
// Respect the standard GRPC_DEFAULT_SSL_ROOTS_FILE_PATH environment variable if present,
// otherwise use the built-in root certificate
let pem = match std::env::var("GRPC_DEFAULT_SSL_ROOTS_FILE_PATH").ok() {
Some(cert_file) => File::open(&cert_file)
.and_then(|mut file| {
let mut pem = Vec::new();
file.read_to_end(&mut pem).map(|_| pem)
})
.map_err(|err| format!("Failed to read {}: {}", cert_file, err))?,
None => {
// PEM file from Google Trust Services (https://pki.goog/roots.pem)
include_bytes!("pki-goog-roots.pem").to_vec()
}
};
Ok(Certificate::from_pem(&pem))
}

View File

@ -17,6 +17,7 @@ use solana_sdk::{
instruction::CompiledInstruction,
message::MessageHeader,
pubkey::Pubkey,
signature::Signature,
transaction::{Result, Transaction, TransactionError},
};
@ -125,7 +126,7 @@ impl From<TransactionStatusMeta> for UiTransactionStatusMeta {
pub struct TransactionStatus {
pub slot: Slot,
pub confirmations: Option<usize>, // None = rooted
pub status: Result<()>,
pub status: Result<()>, // legacy field
pub err: Option<TransactionError>,
}
@ -136,6 +137,15 @@ impl TransactionStatus {
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ConfirmedTransactionStatusWithSignature {
pub signature: Signature,
pub slot: Slot,
pub err: Option<TransactionError>,
pub memo: Option<String>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Reward {
pub pubkey: String,
@ -204,7 +214,7 @@ pub struct TransactionWithStatusMeta {
pub meta: Option<UiTransactionStatusMeta>,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum UiTransactionEncoding {
Binary,

View File

@ -627,7 +627,15 @@ pub fn main() {
.takes_value(false)
.help("Enable historical transaction info over JSON RPC, \
including the 'getConfirmedBlock' API. \
This will cause an increase in disk usage and IOPS"),
This will cause an increase in disk usage and IOPS"),
)
.arg(
Arg::with_name("enable_rpc_bigtable_ledger_storage")
.long("enable-rpc-bigtable-ledger-storage")
.requires("enable_rpc_transaction_history")
.takes_value(false)
.help("Fetch historical transaction info from a BigTable instance \
as a fallback to local ledger data"),
)
.arg(
Arg::with_name("health_check_slot_distance")
@ -938,6 +946,8 @@ pub fn main() {
enable_validator_exit: matches.is_present("enable_rpc_exit"),
enable_set_log_filter: matches.is_present("enable_rpc_set_log_filter"),
enable_rpc_transaction_history: matches.is_present("enable_rpc_transaction_history"),
enable_bigtable_ledger_storage: matches
.is_present("enable_rpc_bigtable_ledger_storage"),
identity_pubkey: identity_keypair.pubkey(),
faucet_addr: matches.value_of("rpc_faucet_addr").map(|address| {
solana_net_utils::parse_host_port(address).expect("failed to parse faucet address")