mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-22 20:48:01 +00:00
light-client: Add experimental light-client support (#965)
* rpc/types: Decode `SubstrateTxStatus` for substrate and smoldot Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * lightclient: Add light client Error Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * lightclient: Add background task to manage RPC responses Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * lightclient: Implement the light client RPC in subxt Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * subxt: Expose light client under experimental feature-flag Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * artifacts: Add development chain spec for local nodes Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Update cargo lock Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * examples: Add light client example Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Update sp-* crates and smoldot to use git with branch / rev Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Apply cargo fmt Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Fix clippy Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Import hashmap entry Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * lightclient: Fetch spec only if jsonrpsee feature is enabled Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Update subxt/src/rpc/lightclient/background.rs Co-authored-by: Niklas Adolfsson <niklasadolfsson1@gmail.com> * Fix typo Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * artifacts: Update dev chain spec Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * types: Handle storage replies from chainHead_storage Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * artifacts: Add polkadot spec Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * lightclient: Handle RPC error responses Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * examples: Tx basic with light client for local nodes Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * example: Light client coprehensive example for live chains Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * examples: Remove prior light client example Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * feature: Rename experimental to unstable Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * book: Add light client section Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * testing: Fix clippy Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * lightclient: Ignore validated events Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Adjust tests for light-clients and normal clients Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * testing: Keep lightclient variant Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Remove support for chainHead_storage for light client Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Update light client to point to crates.io Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Update sp-crates from crates.io Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Replace Atomic with u64 Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Add LightClientBuilder Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Adjust chainspec with provided bootnodes Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Add potential_relay_chains to light client builder Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Move the light-client to the background task Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Adjust tracing logs Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Update book and example Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Apply cargo fmt Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Remove dev_spec.json artifact Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Examples fix duplicate Cargo.toml Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Use tracing_subscriber crate Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Fix clippy for different features Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Add comment about bootNodes Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Add comment about tracing-sub dependency Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Run integration-tests with light-client Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Feature guard some incompatible tests Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * ci: Enable light-client tests under feature flag Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * ci: Fix git step name Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Adjust flags for testing Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Adjust warnings Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Rename feature flag jsonrpsee-ws to jsonrpsee Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Fix cargo check Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * ci: Run tests on just 2 threads Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Move light-client to subxt/src/client Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Adjust LightClientBuilder Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Use ws_url to construct light client for testing Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Refactor background Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Address feedback Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Remove polkadot.spec and trim sub_id Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Wait for substrate to produce block before connecting light client Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Adjust builder and tests Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Apply fmt Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * ci: Use release for light client testing Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Add single test for light-client Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Wait for more blocks Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Use polkadot endpoint for testing Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Adjust cargo check Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * examples: Remove light client chain connection example Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Adjust cargo.toml section for the old example Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Adjust background task to use usize for subscription Id Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Build bootnodes with serde_json::Value directly Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Make channel between subxt user and subxt background unbounded Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Update subxt/src/client/lightclient/builder.rs Co-authored-by: Niklas Adolfsson <niklasadolfsson1@gmail.com> * Switch to smoldot 0.6.0 from 0.5.0 Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Move testing to `full_client` and `light_client` higher modules Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Remove subscriptionID type Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Remove subxt/integration-testing feature flag Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Adjust wait_for_blocks documentation Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Adjust utils import for testing Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Remove into_iter from builder construction Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> --------- Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> Co-authored-by: Niklas Adolfsson <niklasadolfsson1@gmail.com>
This commit is contained in:
@@ -0,0 +1,181 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use crate::{test_context, utils::node_runtime};
|
||||
use codec::{Compact, Encode};
|
||||
use futures::StreamExt;
|
||||
use subxt::blocks::BlocksClient;
|
||||
use subxt_metadata::Metadata;
|
||||
use subxt_signer::sr25519::dev;
|
||||
|
||||
// Check that we can subscribe to non-finalized blocks.
|
||||
#[tokio::test]
|
||||
async fn non_finalized_headers_subscription() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let mut sub = api.blocks().subscribe_best().await?;
|
||||
|
||||
// Wait for the next set of headers, and check that the
|
||||
// associated block hash is the one we just finalized.
|
||||
// (this can be a bit slow as we have to wait for finalization)
|
||||
let header = sub.next().await.unwrap()?;
|
||||
let block_hash = header.hash();
|
||||
let current_block_hash = api.rpc().block_hash(None).await?.unwrap();
|
||||
|
||||
assert_eq!(block_hash, current_block_hash);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Check that we can subscribe to finalized blocks.
|
||||
#[tokio::test]
|
||||
async fn finalized_headers_subscription() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let mut sub = api.blocks().subscribe_finalized().await?;
|
||||
|
||||
// Wait for the next set of headers, and check that the
|
||||
// associated block hash is the one we just finalized.
|
||||
// (this can be a bit slow as we have to wait for finalization)
|
||||
let header = sub.next().await.unwrap()?;
|
||||
let finalized_hash = api.rpc().finalized_head().await?;
|
||||
|
||||
assert_eq!(header.hash(), finalized_hash);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn missing_block_headers_will_be_filled_in() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
// Manually subscribe to the next 6 finalized block headers, but deliberately
|
||||
// filter out some in the middle so we get back b _ _ b _ b. This guarantees
|
||||
// that there will be some gaps, even if there aren't any from the subscription.
|
||||
let some_finalized_blocks = api
|
||||
.rpc()
|
||||
.subscribe_finalized_block_headers()
|
||||
.await?
|
||||
.enumerate()
|
||||
.take(6)
|
||||
.filter(|(n, _)| {
|
||||
let n = *n;
|
||||
async move { n == 0 || n == 3 || n == 5 }
|
||||
})
|
||||
.map(|(_, h)| h);
|
||||
|
||||
// This should spot any gaps in the middle and fill them back in.
|
||||
let all_finalized_blocks = subxt::blocks::subscribe_to_block_headers_filling_in_gaps(
|
||||
ctx.client(),
|
||||
None,
|
||||
some_finalized_blocks,
|
||||
);
|
||||
futures::pin_mut!(all_finalized_blocks);
|
||||
|
||||
// Iterate the block headers, making sure we get them all in order.
|
||||
let mut last_block_number = None;
|
||||
while let Some(header) = all_finalized_blocks.next().await {
|
||||
let header = header?;
|
||||
|
||||
use subxt::config::Header;
|
||||
let block_number: u128 = header.number().into();
|
||||
|
||||
if let Some(last) = last_block_number {
|
||||
assert_eq!(last + 1, block_number);
|
||||
}
|
||||
last_block_number = Some(block_number);
|
||||
}
|
||||
|
||||
assert!(last_block_number.is_some());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Check that we can subscribe to non-finalized blocks.
|
||||
#[tokio::test]
|
||||
async fn runtime_api_call() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let mut sub = api.blocks().subscribe_best().await?;
|
||||
|
||||
let block = sub.next().await.unwrap()?;
|
||||
let rt = block.runtime_api().await?;
|
||||
|
||||
// get metadata via state_call.
|
||||
let (_, meta1) = rt
|
||||
.call_raw::<(Compact<u32>, Metadata)>("Metadata_metadata", None)
|
||||
.await?;
|
||||
|
||||
// get metadata via `state_getMetadata`.
|
||||
let meta2 = api.rpc().metadata_legacy(None).await?;
|
||||
|
||||
// They should be the same.
|
||||
assert_eq!(meta1.encode(), meta2.encode());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn decode_extrinsics() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
let bob = dev::bob();
|
||||
|
||||
// Generate a block that has unsigned and signed transactions.
|
||||
let tx = node_runtime::tx()
|
||||
.balances()
|
||||
.transfer(bob.public_key().into(), 10_000);
|
||||
|
||||
let signed_extrinsic = api
|
||||
.tx()
|
||||
.create_signed(&tx, &alice, Default::default())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let in_block = signed_extrinsic
|
||||
.submit_and_watch()
|
||||
.await
|
||||
.unwrap()
|
||||
.wait_for_in_block()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let block_hash = in_block.block_hash();
|
||||
|
||||
let block = BlocksClient::new(api).at(block_hash).await.unwrap();
|
||||
let extrinsics = block.body().await.unwrap().extrinsics();
|
||||
assert_eq!(extrinsics.len(), 2);
|
||||
assert_eq!(extrinsics.block_hash(), block_hash);
|
||||
|
||||
assert!(extrinsics
|
||||
.has::<node_runtime::balances::calls::types::Transfer>()
|
||||
.unwrap());
|
||||
|
||||
assert!(extrinsics
|
||||
.find_first::<node_runtime::balances::calls::types::Transfer>()
|
||||
.unwrap()
|
||||
.is_some());
|
||||
|
||||
let block_extrinsics = extrinsics
|
||||
.iter()
|
||||
.map(|res| res.unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(block_extrinsics.len(), 2);
|
||||
let timestamp = block_extrinsics.get(0).unwrap();
|
||||
timestamp.as_root_extrinsic::<node_runtime::Call>().unwrap();
|
||||
timestamp
|
||||
.as_extrinsic::<node_runtime::timestamp::calls::types::Set>()
|
||||
.unwrap();
|
||||
assert!(!timestamp.is_signed());
|
||||
|
||||
let tx = block_extrinsics.get(1).unwrap();
|
||||
tx.as_root_extrinsic::<node_runtime::Call>().unwrap();
|
||||
tx.as_extrinsic::<node_runtime::balances::calls::types::Transfer>()
|
||||
.unwrap();
|
||||
assert!(tx.is_signed());
|
||||
}
|
||||
@@ -0,0 +1,681 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use crate::{
|
||||
test_context, test_context_with,
|
||||
utils::{node_runtime, wait_for_blocks},
|
||||
};
|
||||
use assert_matches::assert_matches;
|
||||
use codec::{Compact, Decode, Encode};
|
||||
use sp_core::storage::well_known_keys;
|
||||
use subxt::{
|
||||
error::{DispatchError, Error, TokenError},
|
||||
rpc::types::{
|
||||
ChainHeadEvent, DryRunResult, DryRunResultBytes, FollowEvent, Initialized, RuntimeEvent,
|
||||
RuntimeVersionEvent,
|
||||
},
|
||||
utils::AccountId32,
|
||||
};
|
||||
use subxt_metadata::Metadata;
|
||||
use subxt_signer::sr25519::dev;
|
||||
|
||||
#[tokio::test]
|
||||
async fn insert_key() {
|
||||
let ctx = test_context_with("bob".to_string()).await;
|
||||
let api = ctx.client();
|
||||
|
||||
let public = dev::alice().public_key().as_ref().to_vec();
|
||||
api.rpc()
|
||||
.insert_key(
|
||||
"aura".to_string(),
|
||||
"//Alice".to_string(),
|
||||
public.clone().into(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(api
|
||||
.rpc()
|
||||
.has_key(public.clone().into(), "aura".to_string())
|
||||
.await
|
||||
.unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fetch_block_hash() {
|
||||
let ctx = test_context().await;
|
||||
ctx.client().rpc().block_hash(None).await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fetch_block() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let block_hash = api.rpc().block_hash(None).await.unwrap();
|
||||
api.rpc().block(block_hash).await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fetch_read_proof() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let block_hash = api.rpc().block_hash(None).await.unwrap();
|
||||
api.rpc()
|
||||
.read_proof(
|
||||
vec![
|
||||
well_known_keys::HEAP_PAGES,
|
||||
well_known_keys::EXTRINSIC_INDEX,
|
||||
],
|
||||
block_hash,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn chain_subscribe_all_blocks() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let mut blocks = api.rpc().subscribe_all_block_headers().await.unwrap();
|
||||
blocks.next().await.unwrap().unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn chain_subscribe_best_blocks() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let mut blocks = api.rpc().subscribe_best_block_headers().await.unwrap();
|
||||
blocks.next().await.unwrap().unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn chain_subscribe_finalized_blocks() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let mut blocks = api.rpc().subscribe_finalized_block_headers().await.unwrap();
|
||||
blocks.next().await.unwrap().unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fetch_keys() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let addr = node_runtime::storage().system().account_root();
|
||||
let keys = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await
|
||||
.unwrap()
|
||||
.fetch_keys(&addr.to_root_bytes(), 4, None)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(keys.len(), 4)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_iter() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let addr = node_runtime::storage().system().account_root();
|
||||
let mut iter = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await
|
||||
.unwrap()
|
||||
.iter(addr, 10)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut i = 0;
|
||||
while iter.next().await.unwrap().is_some() {
|
||||
i += 1;
|
||||
}
|
||||
assert_eq!(i, 13);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fetch_system_info() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
assert_eq!(api.rpc().system_chain().await.unwrap(), "Development");
|
||||
assert_eq!(api.rpc().system_name().await.unwrap(), "Substrate Node");
|
||||
assert!(!api.rpc().system_version().await.unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dry_run_passes() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
let bob = dev::bob();
|
||||
|
||||
wait_for_blocks(&api).await;
|
||||
|
||||
let tx = node_runtime::tx()
|
||||
.balances()
|
||||
.transfer(bob.public_key().into(), 10_000);
|
||||
|
||||
let signed_extrinsic = api
|
||||
.tx()
|
||||
.create_signed(&tx, &alice, Default::default())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
signed_extrinsic
|
||||
.dry_run(None)
|
||||
.await
|
||||
.expect("dryrunning failed");
|
||||
|
||||
signed_extrinsic
|
||||
.submit_and_watch()
|
||||
.await
|
||||
.unwrap()
|
||||
.wait_for_finalized_success()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dry_run_fails() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
wait_for_blocks(&api).await;
|
||||
|
||||
let alice = dev::alice();
|
||||
let bob = dev::bob();
|
||||
|
||||
let tx = node_runtime::tx().balances().transfer(
|
||||
bob.public_key().into(),
|
||||
// 7 more than the default amount Alice has, so this should fail; insufficient funds:
|
||||
1_000_000_000_000_000_000_007,
|
||||
);
|
||||
|
||||
let signed_extrinsic = api
|
||||
.tx()
|
||||
.create_signed(&tx, &alice, Default::default())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let dry_run_res = signed_extrinsic
|
||||
.dry_run(None)
|
||||
.await
|
||||
.expect("dryrunning failed");
|
||||
|
||||
assert_eq!(
|
||||
dry_run_res,
|
||||
DryRunResult::DispatchError(DispatchError::Token(TokenError::FundsUnavailable))
|
||||
);
|
||||
|
||||
let res = signed_extrinsic
|
||||
.submit_and_watch()
|
||||
.await
|
||||
.unwrap()
|
||||
.wait_for_finalized_success()
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
matches!(
|
||||
res,
|
||||
Err(Error::Runtime(DispatchError::Token(
|
||||
TokenError::FundsUnavailable
|
||||
)))
|
||||
),
|
||||
"Expected an insufficient balance, got {res:?}"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dry_run_result_is_substrate_compatible() {
|
||||
use sp_runtime::{
|
||||
transaction_validity::{
|
||||
InvalidTransaction as SpInvalidTransaction,
|
||||
TransactionValidityError as SpTransactionValidityError,
|
||||
},
|
||||
ApplyExtrinsicResult as SpApplyExtrinsicResult, DispatchError as SpDispatchError,
|
||||
TokenError as SpTokenError,
|
||||
};
|
||||
|
||||
// We really just connect to a node to get some valid metadata to help us
|
||||
// decode Dispatch Errors.
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let pairs = vec![
|
||||
// All ok
|
||||
(SpApplyExtrinsicResult::Ok(Ok(())), DryRunResult::Success),
|
||||
// Some transaction error
|
||||
(
|
||||
SpApplyExtrinsicResult::Err(SpTransactionValidityError::Invalid(
|
||||
SpInvalidTransaction::BadProof,
|
||||
)),
|
||||
DryRunResult::TransactionValidityError,
|
||||
),
|
||||
// Some dispatch errors to check that they decode OK. We've tested module errors
|
||||
// "in situ" in other places so avoid the complexity of testing them properly here.
|
||||
(
|
||||
SpApplyExtrinsicResult::Ok(Err(SpDispatchError::Other("hi"))),
|
||||
DryRunResult::DispatchError(DispatchError::Other),
|
||||
),
|
||||
(
|
||||
SpApplyExtrinsicResult::Ok(Err(SpDispatchError::CannotLookup)),
|
||||
DryRunResult::DispatchError(DispatchError::CannotLookup),
|
||||
),
|
||||
(
|
||||
SpApplyExtrinsicResult::Ok(Err(SpDispatchError::BadOrigin)),
|
||||
DryRunResult::DispatchError(DispatchError::BadOrigin),
|
||||
),
|
||||
(
|
||||
SpApplyExtrinsicResult::Ok(Err(SpDispatchError::Token(SpTokenError::CannotCreate))),
|
||||
DryRunResult::DispatchError(DispatchError::Token(TokenError::CannotCreate)),
|
||||
),
|
||||
];
|
||||
|
||||
for (actual, expected) in pairs {
|
||||
let encoded = actual.encode();
|
||||
let res = DryRunResultBytes(encoded)
|
||||
.into_dry_run_result(&api.metadata())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(res, expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn external_signing() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
let alice = dev::alice();
|
||||
|
||||
// Create a partial extrinsic. We can get the signer payload at this point, to be
|
||||
// signed externally.
|
||||
let tx = node_runtime::tx().preimage().note_preimage(vec![0u8]);
|
||||
let partial_extrinsic = api
|
||||
.tx()
|
||||
.create_partial_signed(&tx, &alice.public_key().into(), Default::default())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Get the signer payload.
|
||||
let signer_payload = partial_extrinsic.signer_payload();
|
||||
// Sign it (possibly externally).
|
||||
let signature = alice.sign(&signer_payload);
|
||||
// Use this to build a signed extrinsic.
|
||||
let extrinsic = partial_extrinsic
|
||||
.sign_with_address_and_signature(&alice.public_key().into(), &signature.into());
|
||||
|
||||
// And now submit it.
|
||||
extrinsic
|
||||
.submit_and_watch()
|
||||
.await
|
||||
.unwrap()
|
||||
.wait_for_finalized_success()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn submit_large_extrinsic() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
|
||||
// 2 MiB blob of data.
|
||||
let bytes = vec![0_u8; 2 * 1024 * 1024];
|
||||
// The preimage pallet allows storing and managing large byte-blobs.
|
||||
let tx = node_runtime::tx().preimage().note_preimage(bytes);
|
||||
|
||||
let signed_extrinsic = api
|
||||
.tx()
|
||||
.create_signed(&tx, &alice, Default::default())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
signed_extrinsic
|
||||
.submit_and_watch()
|
||||
.await
|
||||
.unwrap()
|
||||
.wait_for_finalized_success()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn decode_a_module_error() {
|
||||
use node_runtime::runtime_types::pallet_assets::pallet as assets;
|
||||
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
let alice_addr = alice.public_key().into();
|
||||
|
||||
// Trying to work with an asset ID 1 which doesn't exist should return an
|
||||
// "unknown" module error from the assets pallet.
|
||||
let freeze_unknown_asset = node_runtime::tx().assets().freeze(1, alice_addr);
|
||||
|
||||
let err = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&freeze_unknown_asset, &alice)
|
||||
.await
|
||||
.unwrap()
|
||||
.wait_for_finalized_success()
|
||||
.await
|
||||
.expect_err("an 'unknown asset' error");
|
||||
|
||||
let Error::Runtime(DispatchError::Module(module_err)) = err else {
|
||||
panic!("Expected a ModuleError, got {err:?}");
|
||||
};
|
||||
|
||||
// Decode the error into our generated Error type.
|
||||
let decoded_err = module_err.as_root_error::<node_runtime::Error>().unwrap();
|
||||
|
||||
// Decoding should result in an Assets.Unknown error:
|
||||
assert_eq!(
|
||||
decoded_err,
|
||||
node_runtime::Error::Assets(assets::Error::Unknown)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn unsigned_extrinsic_is_same_shape_as_polkadotjs() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let tx = node_runtime::tx()
|
||||
.balances()
|
||||
.transfer(dev::alice().public_key().into(), 12345000000000000);
|
||||
|
||||
let actual_tx = api.tx().create_unsigned(&tx).unwrap();
|
||||
|
||||
let actual_tx_bytes = actual_tx.encoded();
|
||||
|
||||
// How these were obtained:
|
||||
// - start local substrate node.
|
||||
// - open polkadot.js UI in browser and point at local node.
|
||||
// - open dev console (may need to refresh page now) and find the WS connection.
|
||||
// - create a balances.transfer to ALICE with 12345 and "submit unsigned".
|
||||
// - find the submitAndWatchExtrinsic call in the WS connection to get these bytes:
|
||||
let expected_tx_bytes = hex::decode(
|
||||
"b004060700d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d0f0090c04bb6db2b"
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Make sure our encoding is the same as the encoding polkadot UI created.
|
||||
assert_eq!(actual_tx_bytes, expected_tx_bytes);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn rpc_state_call() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
// get metadata via state_call.
|
||||
let (_, meta1) = api
|
||||
.rpc()
|
||||
.state_call::<(Compact<u32>, Metadata)>("Metadata_metadata", None, None)
|
||||
.await?;
|
||||
|
||||
// get metadata via `state_getMetadata`.
|
||||
let meta2 = api.rpc().metadata_legacy(None).await?;
|
||||
|
||||
// They should be the same.
|
||||
assert_eq!(meta1.encode(), meta2.encode());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn chainhead_unstable_follow() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
// Check subscription with runtime updates set on false.
|
||||
let mut blocks = api.rpc().chainhead_unstable_follow(false).await.unwrap();
|
||||
let event = blocks.next().await.unwrap().unwrap();
|
||||
// The initialized event should contain the finalized block hash.
|
||||
let finalized_block_hash = api.rpc().finalized_head().await.unwrap();
|
||||
assert_eq!(
|
||||
event,
|
||||
FollowEvent::Initialized(Initialized {
|
||||
finalized_block_hash,
|
||||
finalized_block_runtime: None,
|
||||
})
|
||||
);
|
||||
|
||||
// Expect subscription to produce runtime versions.
|
||||
let mut blocks = api.rpc().chainhead_unstable_follow(true).await.unwrap();
|
||||
let event = blocks.next().await.unwrap().unwrap();
|
||||
// The initialized event should contain the finalized block hash.
|
||||
let finalized_block_hash = api.rpc().finalized_head().await.unwrap();
|
||||
let runtime_version = ctx.client().runtime_version();
|
||||
|
||||
assert_matches!(
|
||||
event,
|
||||
FollowEvent::Initialized(init) => {
|
||||
assert_eq!(init.finalized_block_hash, finalized_block_hash);
|
||||
assert_eq!(init.finalized_block_runtime, Some(RuntimeEvent::Valid(RuntimeVersionEvent {
|
||||
spec: runtime_version,
|
||||
})));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn chainhead_unstable_body() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let mut blocks = api.rpc().chainhead_unstable_follow(false).await.unwrap();
|
||||
let event = blocks.next().await.unwrap().unwrap();
|
||||
let hash = match event {
|
||||
FollowEvent::Initialized(init) => init.finalized_block_hash,
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
let sub_id = blocks.subscription_id().unwrap().clone();
|
||||
|
||||
// Subscribe to fetch the block's body.
|
||||
let mut sub = api
|
||||
.rpc()
|
||||
.chainhead_unstable_body(sub_id, hash)
|
||||
.await
|
||||
.unwrap();
|
||||
let event = sub.next().await.unwrap().unwrap();
|
||||
|
||||
// Expected block's extrinsics scale encoded and hex encoded.
|
||||
let body = api.rpc().block(Some(hash)).await.unwrap().unwrap();
|
||||
let extrinsics: Vec<Vec<u8>> = body.block.extrinsics.into_iter().map(|ext| ext.0).collect();
|
||||
let expected = format!("0x{}", hex::encode(extrinsics.encode()));
|
||||
|
||||
assert_matches!(event,
|
||||
ChainHeadEvent::Done(done) if done.result == expected
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn chainhead_unstable_header() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let mut blocks = api.rpc().chainhead_unstable_follow(false).await.unwrap();
|
||||
let event = blocks.next().await.unwrap().unwrap();
|
||||
let hash = match event {
|
||||
FollowEvent::Initialized(init) => init.finalized_block_hash,
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
let sub_id = blocks.subscription_id().unwrap().clone();
|
||||
|
||||
let header = api.rpc().header(Some(hash)).await.unwrap().unwrap();
|
||||
let expected = format!("0x{}", hex::encode(header.encode()));
|
||||
|
||||
let header = api
|
||||
.rpc()
|
||||
.chainhead_unstable_header(sub_id, hash)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(header, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn chainhead_unstable_storage() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let mut blocks = api.rpc().chainhead_unstable_follow(false).await.unwrap();
|
||||
let event = blocks.next().await.unwrap().unwrap();
|
||||
let hash = match event {
|
||||
FollowEvent::Initialized(init) => init.finalized_block_hash,
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
let sub_id = blocks.subscription_id().unwrap().clone();
|
||||
|
||||
let alice: AccountId32 = dev::alice().public_key().into();
|
||||
let addr = node_runtime::storage().system().account(alice);
|
||||
let addr_bytes = api.storage().address_bytes(&addr).unwrap();
|
||||
|
||||
let mut sub = api
|
||||
.rpc()
|
||||
.chainhead_unstable_storage(sub_id, hash, &addr_bytes, None)
|
||||
.await
|
||||
.unwrap();
|
||||
let event = sub.next().await.unwrap().unwrap();
|
||||
|
||||
assert_matches!(event, ChainHeadEvent::<Option<String>>::Done(done) if done.result.is_some());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn chainhead_unstable_call() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let mut blocks = api.rpc().chainhead_unstable_follow(true).await.unwrap();
|
||||
let event = blocks.next().await.unwrap().unwrap();
|
||||
let hash = match event {
|
||||
FollowEvent::Initialized(init) => init.finalized_block_hash,
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
let sub_id = blocks.subscription_id().unwrap().clone();
|
||||
|
||||
let alice_id = dev::alice().public_key().to_account_id();
|
||||
let mut sub = api
|
||||
.rpc()
|
||||
.chainhead_unstable_call(
|
||||
sub_id,
|
||||
hash,
|
||||
"AccountNonceApi_account_nonce".into(),
|
||||
&alice_id.encode(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let event = sub.next().await.unwrap().unwrap();
|
||||
|
||||
assert_matches!(event, ChainHeadEvent::<String>::Done(_));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn chainhead_unstable_unpin() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let mut blocks = api.rpc().chainhead_unstable_follow(true).await.unwrap();
|
||||
let event = blocks.next().await.unwrap().unwrap();
|
||||
let hash = match event {
|
||||
FollowEvent::Initialized(init) => init.finalized_block_hash,
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
let sub_id = blocks.subscription_id().unwrap().clone();
|
||||
|
||||
assert!(api
|
||||
.rpc()
|
||||
.chainhead_unstable_unpin(sub_id.clone(), hash)
|
||||
.await
|
||||
.is_ok());
|
||||
// The block was already unpinned.
|
||||
assert!(api
|
||||
.rpc()
|
||||
.chainhead_unstable_unpin(sub_id, hash)
|
||||
.await
|
||||
.is_err());
|
||||
}
|
||||
|
||||
/// taken from original type <https://docs.rs/pallet-transaction-payment/latest/pallet_transaction_payment/struct.FeeDetails.html>
|
||||
#[derive(Encode, Decode, Debug, Clone, Eq, PartialEq)]
|
||||
pub struct FeeDetails {
|
||||
/// The minimum fee for a transaction to be included in a block.
|
||||
pub inclusion_fee: Option<InclusionFee>,
|
||||
/// tip
|
||||
pub tip: u128,
|
||||
}
|
||||
|
||||
/// taken from original type <https://docs.rs/pallet-transaction-payment/latest/pallet_transaction_payment/struct.InclusionFee.html>
|
||||
/// The base fee and adjusted weight and length fees constitute the _inclusion fee_.
|
||||
#[derive(Encode, Decode, Debug, Clone, Eq, PartialEq)]
|
||||
pub struct InclusionFee {
|
||||
/// minimum amount a user pays for a transaction.
|
||||
pub base_fee: u128,
|
||||
/// amount paid for the encoded length (in bytes) of the transaction.
|
||||
pub len_fee: u128,
|
||||
///
|
||||
/// - `targeted_fee_adjustment`: This is a multiplier that can tune the final fee based on the
|
||||
/// congestion of the network.
|
||||
/// - `weight_fee`: This amount is computed based on the weight of the transaction. Weight
|
||||
/// accounts for the execution time of a transaction.
|
||||
///
|
||||
/// adjusted_weight_fee = targeted_fee_adjustment * weight_fee
|
||||
pub adjusted_weight_fee: u128,
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn partial_fee_estimate_correct() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
let bob = dev::bob();
|
||||
let tx = node_runtime::tx()
|
||||
.balances()
|
||||
.transfer(bob.public_key().into(), 1_000_000_000_000);
|
||||
|
||||
let signed_extrinsic = api
|
||||
.tx()
|
||||
.create_signed(&tx, &alice, Default::default())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Method I: TransactionPaymentApi_query_info
|
||||
let partial_fee_1 = signed_extrinsic.partial_fee_estimate().await.unwrap();
|
||||
|
||||
// Method II: TransactionPaymentApi_query_fee_details + calculations
|
||||
let len_bytes: [u8; 4] = (signed_extrinsic.encoded().len() as u32).to_le_bytes();
|
||||
let encoded_with_len = [signed_extrinsic.encoded(), &len_bytes[..]].concat();
|
||||
let InclusionFee {
|
||||
base_fee,
|
||||
len_fee,
|
||||
adjusted_weight_fee,
|
||||
} = api
|
||||
.rpc()
|
||||
.state_call::<FeeDetails>(
|
||||
"TransactionPaymentApi_query_fee_details",
|
||||
Some(&encoded_with_len),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.inclusion_fee
|
||||
.unwrap();
|
||||
let partial_fee_2 = base_fee + len_fee + adjusted_weight_fee;
|
||||
|
||||
// Both methods should yield the same fee
|
||||
assert_eq!(partial_fee_1, partial_fee_2);
|
||||
}
|
||||
@@ -0,0 +1,219 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use frame_metadata::{
|
||||
v15::{ExtrinsicMetadata, RuntimeMetadataV15},
|
||||
RuntimeMetadataPrefixed,
|
||||
};
|
||||
use scale_info::{meta_type, IntoPortable, PortableRegistry, Registry, TypeInfo};
|
||||
use subxt_codegen::{CratePath, DerivesRegistry, RuntimeGenerator, TypeSubstitutes};
|
||||
use syn::__private::quote;
|
||||
|
||||
fn generate_runtime_interface_from_metadata(metadata: RuntimeMetadataPrefixed) -> String {
|
||||
// Generate a runtime interface from the provided metadata.
|
||||
let metadata = metadata
|
||||
.try_into()
|
||||
.expect("frame_metadata should be convertible into Metadata");
|
||||
let generator = RuntimeGenerator::new(metadata);
|
||||
let item_mod = syn::parse_quote!(
|
||||
pub mod api {}
|
||||
);
|
||||
let crate_path = CratePath::default();
|
||||
let derives = DerivesRegistry::with_default_derives(&crate_path);
|
||||
let type_substitutes = TypeSubstitutes::with_default_substitutes(&crate_path);
|
||||
generator
|
||||
.generate_runtime(item_mod, derives, type_substitutes, crate_path, false)
|
||||
.expect("API generation must be valid")
|
||||
.to_string()
|
||||
}
|
||||
|
||||
fn generate_runtime_interface_with_type_registry<F>(f: F) -> String
|
||||
where
|
||||
F: Fn(&mut scale_info::Registry),
|
||||
{
|
||||
#[derive(TypeInfo)]
|
||||
struct Runtime;
|
||||
#[derive(TypeInfo)]
|
||||
enum RuntimeCall {}
|
||||
#[derive(TypeInfo)]
|
||||
enum RuntimeEvent {}
|
||||
#[derive(TypeInfo)]
|
||||
pub enum DispatchError {}
|
||||
|
||||
// We need these types for codegen to work:
|
||||
let mut registry = scale_info::Registry::new();
|
||||
let ty = registry.register_type(&meta_type::<Runtime>());
|
||||
registry.register_type(&meta_type::<RuntimeCall>());
|
||||
registry.register_type(&meta_type::<RuntimeEvent>());
|
||||
registry.register_type(&meta_type::<DispatchError>());
|
||||
|
||||
// Allow custom types to be added for testing:
|
||||
f(&mut registry);
|
||||
|
||||
let extrinsic = ExtrinsicMetadata {
|
||||
ty: meta_type::<()>(),
|
||||
version: 0,
|
||||
signed_extensions: vec![],
|
||||
}
|
||||
.into_portable(&mut registry);
|
||||
let metadata = RuntimeMetadataV15 {
|
||||
types: registry.into(),
|
||||
pallets: Vec::new(),
|
||||
extrinsic,
|
||||
ty,
|
||||
apis: vec![],
|
||||
};
|
||||
|
||||
let metadata = RuntimeMetadataPrefixed::from(metadata);
|
||||
generate_runtime_interface_from_metadata(metadata)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dupe_types_do_not_overwrite_each_other() {
|
||||
let interface = generate_runtime_interface_with_type_registry(|registry| {
|
||||
// Now we duplicate some types with same type info. We need two unique types here,
|
||||
// and can't just add one type to the registry twice, because the registry knows if
|
||||
// type IDs are the same.
|
||||
enum Foo {}
|
||||
impl TypeInfo for Foo {
|
||||
type Identity = Self;
|
||||
fn type_info() -> scale_info::Type {
|
||||
scale_info::Type::builder()
|
||||
.path(scale_info::Path::new("DuplicateType", "dupe_mod"))
|
||||
.variant(
|
||||
scale_info::build::Variants::new()
|
||||
.variant("FirstDupeTypeVariant", |builder| builder.index(0)),
|
||||
)
|
||||
}
|
||||
}
|
||||
enum Bar {}
|
||||
impl TypeInfo for Bar {
|
||||
type Identity = Self;
|
||||
fn type_info() -> scale_info::Type {
|
||||
scale_info::Type::builder()
|
||||
.path(scale_info::Path::new("DuplicateType", "dupe_mod"))
|
||||
.variant(
|
||||
scale_info::build::Variants::new()
|
||||
.variant("SecondDupeTypeVariant", |builder| builder.index(0)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
registry.register_type(&meta_type::<Foo>());
|
||||
registry.register_type(&meta_type::<Bar>());
|
||||
});
|
||||
|
||||
assert!(interface.contains("DuplicateType"));
|
||||
assert!(interface.contains("FirstDupeTypeVariant"));
|
||||
|
||||
assert!(interface.contains("DuplicateType2"));
|
||||
assert!(interface.contains("SecondDupeTypeVariant"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generic_types_overwrite_each_other() {
|
||||
let interface = generate_runtime_interface_with_type_registry(|registry| {
|
||||
// If we have two types mentioned in the registry that have generic params,
|
||||
// only one type will be output (the codegen assumes that the generic param will disambiguate)
|
||||
enum Foo {}
|
||||
impl TypeInfo for Foo {
|
||||
type Identity = Self;
|
||||
fn type_info() -> scale_info::Type {
|
||||
scale_info::Type::builder()
|
||||
.path(scale_info::Path::new("DuplicateType", "dupe_mod"))
|
||||
.type_params([scale_info::TypeParameter::new("T", Some(meta_type::<u8>()))])
|
||||
.variant(scale_info::build::Variants::new())
|
||||
}
|
||||
}
|
||||
enum Bar {}
|
||||
impl TypeInfo for Bar {
|
||||
type Identity = Self;
|
||||
fn type_info() -> scale_info::Type {
|
||||
scale_info::Type::builder()
|
||||
.path(scale_info::Path::new("DuplicateType", "dupe_mod"))
|
||||
.type_params([scale_info::TypeParameter::new("T", Some(meta_type::<u8>()))])
|
||||
.variant(scale_info::build::Variants::new())
|
||||
}
|
||||
}
|
||||
|
||||
registry.register_type(&meta_type::<Foo>());
|
||||
registry.register_type(&meta_type::<Bar>());
|
||||
});
|
||||
|
||||
assert!(interface.contains("DuplicateType"));
|
||||
// We do _not_ expect this to exist, since a generic is present on the type:
|
||||
assert!(!interface.contains("DuplicateType2"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn more_than_1_generic_parameters_work() {
|
||||
#[allow(unused)]
|
||||
#[derive(TypeInfo)]
|
||||
struct Foo<T, U, V, W> {
|
||||
a: T,
|
||||
b: U,
|
||||
c: V,
|
||||
d: W,
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
#[derive(TypeInfo)]
|
||||
struct Bar {
|
||||
p: Foo<u32, u32, u64, u128>,
|
||||
q: Foo<u8, u8, u8, u8>,
|
||||
}
|
||||
|
||||
let mut registry = Registry::new();
|
||||
registry.register_type(&meta_type::<Bar>());
|
||||
let portable_types: PortableRegistry = registry.into();
|
||||
|
||||
let type_gen = subxt_codegen::TypeGenerator::new(
|
||||
&portable_types,
|
||||
"root",
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
CratePath::default(),
|
||||
false,
|
||||
);
|
||||
|
||||
let types = type_gen.generate_types_mod().unwrap();
|
||||
let generated_mod = quote::quote!( #types);
|
||||
|
||||
let expected_mod = quote::quote! {
|
||||
pub mod root {
|
||||
use super::root;
|
||||
pub mod integration_tests {
|
||||
use super::root;
|
||||
pub mod codegen {
|
||||
use super::root;
|
||||
pub mod codegen_tests {
|
||||
use super::root;
|
||||
pub struct Bar {
|
||||
pub p: root::integration_tests::codegen::codegen_tests::Foo<
|
||||
::core::primitive::u32,
|
||||
::core::primitive::u32,
|
||||
::core::primitive::u64,
|
||||
::core::primitive::u128
|
||||
>,
|
||||
pub q: root::integration_tests::codegen::codegen_tests::Foo<
|
||||
::core::primitive::u8,
|
||||
::core::primitive::u8,
|
||||
::core::primitive::u8,
|
||||
::core::primitive::u8
|
||||
>,
|
||||
}
|
||||
pub struct Foo<_0, _1, _2, _3> {
|
||||
pub a: _0,
|
||||
pub b: _1,
|
||||
pub c: _2,
|
||||
pub d: _3,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
assert_eq!(generated_mod.to_string(), expected_mod.to_string());
|
||||
}
|
||||
@@ -0,0 +1,176 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use codec::Decode;
|
||||
use regex::Regex;
|
||||
use subxt_codegen::{CratePath, DerivesRegistry, RuntimeGenerator, TypeSubstitutes};
|
||||
use subxt_metadata::Metadata;
|
||||
|
||||
fn load_test_metadata() -> Metadata {
|
||||
let bytes = test_runtime::METADATA;
|
||||
Metadata::decode(&mut &*bytes).expect("Cannot decode scale metadata")
|
||||
}
|
||||
|
||||
fn metadata_docs() -> Vec<String> {
|
||||
// Load the runtime metadata downloaded from a node via `test-runtime`.
|
||||
let metadata = load_test_metadata();
|
||||
|
||||
// Inspect the metadata types and collect the documentation.
|
||||
let mut docs = Vec::new();
|
||||
for ty in &metadata.types().types {
|
||||
docs.extend_from_slice(&ty.ty.docs);
|
||||
}
|
||||
|
||||
for pallet in metadata.pallets() {
|
||||
if let Some(storage) = pallet.storage() {
|
||||
for entry in storage.entries() {
|
||||
docs.extend_from_slice(entry.docs());
|
||||
}
|
||||
}
|
||||
// Note: Calls, Events and Errors are deduced directly to
|
||||
// PortableTypes which are handled above.
|
||||
for constant in pallet.constants() {
|
||||
docs.extend_from_slice(constant.docs());
|
||||
}
|
||||
}
|
||||
// Note: Extrinsics do not have associated documentation, but is implied by
|
||||
// associated Type.
|
||||
|
||||
// Inspect the runtime API types and collect the documentation.
|
||||
for api in metadata.runtime_api_traits() {
|
||||
docs.extend_from_slice(api.docs());
|
||||
for method in api.methods() {
|
||||
docs.extend_from_slice(method.docs());
|
||||
}
|
||||
}
|
||||
|
||||
docs
|
||||
}
|
||||
|
||||
fn generate_runtime_interface(crate_path: CratePath, should_gen_docs: bool) -> String {
|
||||
// Load the runtime metadata downloaded from a node via `test-runtime`.
|
||||
let metadata = load_test_metadata();
|
||||
|
||||
// Generate a runtime interface from the provided metadata.
|
||||
let generator = RuntimeGenerator::new(metadata);
|
||||
let item_mod = syn::parse_quote!(
|
||||
pub mod api {}
|
||||
);
|
||||
let derives = DerivesRegistry::with_default_derives(&crate_path);
|
||||
let type_substitutes = TypeSubstitutes::with_default_substitutes(&crate_path);
|
||||
generator
|
||||
.generate_runtime(
|
||||
item_mod,
|
||||
derives,
|
||||
type_substitutes,
|
||||
crate_path,
|
||||
should_gen_docs,
|
||||
)
|
||||
.expect("API generation must be valid")
|
||||
.to_string()
|
||||
}
|
||||
|
||||
fn interface_docs(should_gen_docs: bool) -> Vec<String> {
|
||||
// Generate the runtime interface from the node's metadata.
|
||||
// Note: the API is generated on a single line.
|
||||
let runtime_api = generate_runtime_interface(CratePath::default(), should_gen_docs);
|
||||
|
||||
// Documentation lines have the following format:
|
||||
// # [ doc = "Upward message is invalid XCM."]
|
||||
// Given the API is generated on a single line, the regex matching
|
||||
// must be lazy hence the `?` in the matched group `(.*?)`.
|
||||
//
|
||||
// The greedy `non-?` matching would lead to one single match
|
||||
// from the beginning of the first documentation tag, containing everything up to
|
||||
// the last documentation tag
|
||||
// `# [ doc = "msg"] # [ doc = "msg2"] ... api ... # [ doc = "msgN" ]`
|
||||
//
|
||||
// The `(.*?)` stands for match any character zero or more times lazily.
|
||||
let re = Regex::new(r#"\# \[doc = "(.*?)"\]"#).unwrap();
|
||||
re.captures_iter(&runtime_api)
|
||||
.filter_map(|capture| {
|
||||
// Get the matched group (ie index 1).
|
||||
capture.get(1).as_ref().map(|doc| {
|
||||
// Generated documentation will escape special characters.
|
||||
// Replace escaped characters with unescaped variants for
|
||||
// exact matching on the raw metadata documentation.
|
||||
doc.as_str()
|
||||
.replace("\\n", "\n")
|
||||
.replace("\\t", "\t")
|
||||
.replace("\\\"", "\"")
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_documentation() {
|
||||
// Inspect metadata recursively and obtain all associated documentation.
|
||||
let raw_docs = metadata_docs();
|
||||
// Obtain documentation from the generated API.
|
||||
let runtime_docs = interface_docs(true);
|
||||
|
||||
for raw in raw_docs.iter() {
|
||||
assert!(
|
||||
runtime_docs.contains(raw),
|
||||
"Documentation not present in runtime API: {raw}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_no_documentation() {
|
||||
// Inspect metadata recursively and obtain all associated documentation.
|
||||
let raw_docs = metadata_docs();
|
||||
// Obtain documentation from the generated API.
|
||||
let runtime_docs = interface_docs(false);
|
||||
|
||||
for raw in raw_docs.iter() {
|
||||
assert!(
|
||||
!runtime_docs.contains(raw),
|
||||
"Documentation should not be present in runtime API: {raw}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_root_attrs_preserved() {
|
||||
let metadata = load_test_metadata();
|
||||
|
||||
// Test that the root docs/attr are preserved.
|
||||
let item_mod = syn::parse_quote!(
|
||||
/// Some root level documentation
|
||||
#[some_root_attribute]
|
||||
pub mod api {}
|
||||
);
|
||||
|
||||
// Generate a runtime interface from the provided metadata.
|
||||
let generator = RuntimeGenerator::new(metadata);
|
||||
let derives = DerivesRegistry::with_default_derives(&CratePath::default());
|
||||
let type_substitutes = TypeSubstitutes::with_default_substitutes(&CratePath::default());
|
||||
let generated_code = generator
|
||||
.generate_runtime(
|
||||
item_mod,
|
||||
derives,
|
||||
type_substitutes,
|
||||
CratePath::default(),
|
||||
true,
|
||||
)
|
||||
.expect("API generation must be valid")
|
||||
.to_string();
|
||||
|
||||
let doc_str_loc = generated_code
|
||||
.find("Some root level documentation")
|
||||
.expect("root docs should be preserved");
|
||||
let attr_loc = generated_code
|
||||
.find("some_root_attribute") // '#' is space separated in generated output.
|
||||
.expect("root attr should be preserved");
|
||||
let mod_start = generated_code
|
||||
.find("pub mod api")
|
||||
.expect("'pub mod api' expected");
|
||||
|
||||
// These things should be before the mod start
|
||||
assert!(doc_str_loc < mod_start);
|
||||
assert!(attr_loc < mod_start);
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
/// Checks that code generated by `subxt-cli codegen` compiles. Allows inspection of compiler errors
|
||||
/// directly, more accurately than via the macro and `cargo expand`.
|
||||
///
|
||||
/// Generate by running this at the root of the repository:
|
||||
///
|
||||
/// ```
|
||||
/// cargo run --bin subxt -- codegen --file artifacts/polkadot_metadata_full.scale | rustfmt > testing/integration-tests/src/codegen/polkadot.rs
|
||||
/// ```
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::all)]
|
||||
mod polkadot;
|
||||
|
||||
mod codegen_tests;
|
||||
mod documentation;
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,408 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use crate::{
|
||||
node_runtime::{self, balances, runtime_types, system},
|
||||
test_context,
|
||||
};
|
||||
use codec::Decode;
|
||||
use subxt::{
|
||||
error::{DispatchError, Error, TokenError},
|
||||
utils::{AccountId32, MultiAddress},
|
||||
};
|
||||
use subxt_signer::sr25519::dev;
|
||||
|
||||
#[tokio::test]
|
||||
async fn tx_basic_transfer() -> Result<(), subxt::Error> {
|
||||
let alice = dev::alice();
|
||||
let bob = dev::bob();
|
||||
let bob_address = bob.public_key().to_address();
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice_account_addr = node_runtime::storage()
|
||||
.system()
|
||||
.account(alice.public_key().to_account_id());
|
||||
let bob_account_addr = node_runtime::storage()
|
||||
.system()
|
||||
.account(bob.public_key().to_account_id());
|
||||
|
||||
let alice_pre = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&alice_account_addr)
|
||||
.await?;
|
||||
let bob_pre = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&bob_account_addr)
|
||||
.await?;
|
||||
|
||||
let tx = node_runtime::tx().balances().transfer(bob_address, 10_000);
|
||||
|
||||
let events = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&tx, &alice)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?;
|
||||
let event = events
|
||||
.find_first::<balances::events::Transfer>()
|
||||
.expect("Failed to decode balances::events::Transfer")
|
||||
.expect("Failed to find balances::events::Transfer");
|
||||
let _extrinsic_success = events
|
||||
.find_first::<system::events::ExtrinsicSuccess>()
|
||||
.expect("Failed to decode ExtrinisicSuccess")
|
||||
.expect("Failed to find ExtrinisicSuccess");
|
||||
|
||||
let expected_event = balances::events::Transfer {
|
||||
from: alice.public_key().to_account_id(),
|
||||
to: bob.public_key().to_account_id(),
|
||||
amount: 10_000,
|
||||
};
|
||||
assert_eq!(event, expected_event);
|
||||
|
||||
let alice_post = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&alice_account_addr)
|
||||
.await?;
|
||||
let bob_post = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&bob_account_addr)
|
||||
.await?;
|
||||
|
||||
assert!(alice_pre.data.free - 10_000 >= alice_post.data.free);
|
||||
assert_eq!(bob_pre.data.free + 10_000, bob_post.data.free);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tx_dynamic_transfer() -> Result<(), subxt::Error> {
|
||||
use subxt::ext::scale_value::{At, Composite, Value};
|
||||
|
||||
let alice = dev::alice();
|
||||
let bob = dev::bob();
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice_account_addr = subxt::dynamic::storage(
|
||||
"System",
|
||||
"Account",
|
||||
vec![Value::from_bytes(alice.public_key().to_account_id())],
|
||||
);
|
||||
let bob_account_addr = subxt::dynamic::storage(
|
||||
"System",
|
||||
"Account",
|
||||
vec![Value::from_bytes(bob.public_key().to_account_id())],
|
||||
);
|
||||
|
||||
let alice_pre = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&alice_account_addr)
|
||||
.await?;
|
||||
let bob_pre = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&bob_account_addr)
|
||||
.await?;
|
||||
|
||||
let tx = subxt::dynamic::tx(
|
||||
"Balances",
|
||||
"transfer",
|
||||
vec![
|
||||
Value::unnamed_variant(
|
||||
"Id",
|
||||
vec![Value::from_bytes(bob.public_key().to_account_id())],
|
||||
),
|
||||
Value::u128(10_000u128),
|
||||
],
|
||||
);
|
||||
|
||||
let events = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&tx, &alice)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?;
|
||||
|
||||
let event_fields = events
|
||||
.iter()
|
||||
.filter_map(|ev| ev.ok())
|
||||
.find(|ev| ev.pallet_name() == "Balances" && ev.variant_name() == "Transfer")
|
||||
.expect("Failed to find Transfer event")
|
||||
.field_values()?
|
||||
.map_context(|_| ());
|
||||
|
||||
let expected_fields = Composite::Named(vec![
|
||||
(
|
||||
"from".into(),
|
||||
Value::unnamed_composite(vec![Value::from_bytes(alice.public_key().to_account_id())]),
|
||||
),
|
||||
(
|
||||
"to".into(),
|
||||
Value::unnamed_composite(vec![Value::from_bytes(bob.public_key().to_account_id())]),
|
||||
),
|
||||
("amount".into(), Value::u128(10_000)),
|
||||
]);
|
||||
assert_eq!(event_fields, expected_fields);
|
||||
|
||||
let alice_post = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&alice_account_addr)
|
||||
.await?;
|
||||
let bob_post = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&bob_account_addr)
|
||||
.await?;
|
||||
|
||||
let alice_pre_free = alice_pre
|
||||
.to_value()?
|
||||
.at("data")
|
||||
.at("free")
|
||||
.unwrap()
|
||||
.as_u128()
|
||||
.unwrap();
|
||||
let alice_post_free = alice_post
|
||||
.to_value()?
|
||||
.at("data")
|
||||
.at("free")
|
||||
.unwrap()
|
||||
.as_u128()
|
||||
.unwrap();
|
||||
|
||||
let bob_pre_free = bob_pre
|
||||
.to_value()?
|
||||
.at("data")
|
||||
.at("free")
|
||||
.unwrap()
|
||||
.as_u128()
|
||||
.unwrap();
|
||||
let bob_post_free = bob_post
|
||||
.to_value()?
|
||||
.at("data")
|
||||
.at("free")
|
||||
.unwrap()
|
||||
.as_u128()
|
||||
.unwrap();
|
||||
|
||||
assert!(alice_pre_free - 10_000 >= alice_post_free);
|
||||
assert_eq!(bob_pre_free + 10_000, bob_post_free);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn multiple_transfers_work_nonce_incremented() -> Result<(), subxt::Error> {
|
||||
let alice = dev::alice();
|
||||
let bob = dev::bob();
|
||||
let bob_address: MultiAddress<AccountId32, u32> = bob.public_key().into();
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let bob_account_addr = node_runtime::storage()
|
||||
.system()
|
||||
.account(bob.public_key().to_account_id());
|
||||
|
||||
let bob_pre = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&bob_account_addr)
|
||||
.await?;
|
||||
|
||||
let tx = node_runtime::tx()
|
||||
.balances()
|
||||
.transfer(bob_address.clone(), 10_000);
|
||||
for _ in 0..3 {
|
||||
api.tx()
|
||||
.sign_and_submit_then_watch_default(&tx, &alice)
|
||||
.await?
|
||||
.wait_for_in_block() // Don't need to wait for finalization; this is quicker.
|
||||
.await?
|
||||
.wait_for_success()
|
||||
.await?;
|
||||
}
|
||||
|
||||
let bob_post = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&bob_account_addr)
|
||||
.await?;
|
||||
|
||||
assert_eq!(bob_pre.data.free + 30_000, bob_post.data.free);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn storage_total_issuance() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let addr = node_runtime::storage().balances().total_issuance();
|
||||
let total_issuance = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await
|
||||
.unwrap()
|
||||
.fetch_or_default(&addr)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_ne!(total_issuance, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn storage_balance_lock() -> Result<(), subxt::Error> {
|
||||
let bob_signer = dev::bob();
|
||||
let bob: AccountId32 = dev::bob().public_key().into();
|
||||
let charlie: AccountId32 = dev::charlie().public_key().into();
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let tx = node_runtime::tx().staking().bond(
|
||||
charlie.into(),
|
||||
100_000_000_000_000,
|
||||
runtime_types::pallet_staking::RewardDestination::Stash,
|
||||
);
|
||||
|
||||
api.tx()
|
||||
.sign_and_submit_then_watch_default(&tx, &bob_signer)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?
|
||||
.find_first::<system::events::ExtrinsicSuccess>()?
|
||||
.expect("No ExtrinsicSuccess Event found");
|
||||
|
||||
let locks_addr = node_runtime::storage().balances().locks(bob);
|
||||
|
||||
let locks = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&locks_addr)
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
locks.0,
|
||||
vec![runtime_types::pallet_balances::types::BalanceLock {
|
||||
id: *b"staking ",
|
||||
amount: 100_000_000_000_000,
|
||||
reasons: runtime_types::pallet_balances::types::Reasons::All,
|
||||
}]
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn transfer_error() {
|
||||
let alice = dev::alice();
|
||||
let alice_addr = alice.public_key().into();
|
||||
let bob = dev::one(); // some dev account with no funds.
|
||||
let bob_address = bob.public_key().into();
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let to_bob_tx = node_runtime::tx()
|
||||
.balances()
|
||||
.transfer(bob_address, 100_000_000_000_000_000);
|
||||
let to_alice_tx = node_runtime::tx()
|
||||
.balances()
|
||||
.transfer(alice_addr, 100_000_000_000_000_000);
|
||||
|
||||
api.tx()
|
||||
.sign_and_submit_then_watch_default(&to_bob_tx, &alice)
|
||||
.await
|
||||
.unwrap()
|
||||
.wait_for_finalized_success()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// When we try giving all of the funds back, Bob doesn't have
|
||||
// anything left to pay transfer fees, so we hit an error.
|
||||
let res = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&to_alice_tx, &bob)
|
||||
.await
|
||||
.unwrap()
|
||||
.wait_for_finalized_success()
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
matches!(
|
||||
res,
|
||||
Err(Error::Runtime(DispatchError::Token(
|
||||
TokenError::FundsUnavailable
|
||||
)))
|
||||
),
|
||||
"Expected an insufficient balance, got {res:?}"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn transfer_implicit_subscription() {
|
||||
let alice = dev::alice();
|
||||
let bob: AccountId32 = dev::bob().public_key().into();
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let to_bob_tx = node_runtime::tx()
|
||||
.balances()
|
||||
.transfer(bob.clone().into(), 10_000);
|
||||
|
||||
let event = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&to_bob_tx, &alice)
|
||||
.await
|
||||
.unwrap()
|
||||
.wait_for_finalized_success()
|
||||
.await
|
||||
.unwrap()
|
||||
.find_first::<balances::events::Transfer>()
|
||||
.expect("Can decode events")
|
||||
.expect("Can find balance transfer event");
|
||||
|
||||
assert_eq!(
|
||||
event,
|
||||
balances::events::Transfer {
|
||||
from: alice.public_key().to_account_id(),
|
||||
to: bob,
|
||||
amount: 10_000
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn constant_existential_deposit() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
// get and decode constant manually via metadata:
|
||||
let metadata = api.metadata();
|
||||
let balances_metadata = metadata.pallet_by_name("Balances").unwrap();
|
||||
let constant_metadata = balances_metadata
|
||||
.constant_by_name("ExistentialDeposit")
|
||||
.unwrap();
|
||||
let existential_deposit = u128::decode(&mut constant_metadata.value()).unwrap();
|
||||
assert_eq!(existential_deposit, 100_000_000_000_000);
|
||||
|
||||
// constant address for API access:
|
||||
let addr = node_runtime::constants().balances().existential_deposit();
|
||||
|
||||
// Make sure thetwo are identical:
|
||||
assert_eq!(existential_deposit, api.constants().at(&addr).unwrap());
|
||||
}
|
||||
@@ -0,0 +1,233 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use crate::{
|
||||
node_runtime::{
|
||||
self,
|
||||
contracts::events,
|
||||
runtime_types::{pallet_contracts::wasm::Determinism, sp_weights::weight_v2::Weight},
|
||||
system,
|
||||
},
|
||||
test_context, TestContext,
|
||||
};
|
||||
use subxt::{tx::TxProgress, utils::MultiAddress, Config, Error, OnlineClient, SubstrateConfig};
|
||||
use subxt_signer::sr25519::{self, dev};
|
||||
|
||||
struct ContractsTestContext {
|
||||
cxt: TestContext,
|
||||
signer: sr25519::Keypair,
|
||||
}
|
||||
|
||||
type Hash = <SubstrateConfig as Config>::Hash;
|
||||
type AccountId = <SubstrateConfig as Config>::AccountId;
|
||||
|
||||
const CONTRACT: &str = r#"
|
||||
(module
|
||||
(func (export "call"))
|
||||
(func (export "deploy"))
|
||||
)
|
||||
"#;
|
||||
|
||||
const PROOF_SIZE: u64 = u64::MAX / 2;
|
||||
|
||||
impl ContractsTestContext {
|
||||
async fn init() -> Self {
|
||||
let cxt = test_context().await;
|
||||
let signer = dev::alice();
|
||||
|
||||
Self { cxt, signer }
|
||||
}
|
||||
|
||||
fn client(&self) -> OnlineClient<SubstrateConfig> {
|
||||
self.cxt.client()
|
||||
}
|
||||
|
||||
async fn upload_code(&self) -> Result<Hash, Error> {
|
||||
let code = wabt::wat2wasm(CONTRACT).expect("invalid wabt");
|
||||
|
||||
let upload_tx =
|
||||
node_runtime::tx()
|
||||
.contracts()
|
||||
.upload_code(code, None, Determinism::Enforced);
|
||||
|
||||
let events = self
|
||||
.client()
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&upload_tx, &self.signer)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?;
|
||||
|
||||
let code_stored = events
|
||||
.find_first::<events::CodeStored>()?
|
||||
.ok_or_else(|| Error::Other("Failed to find a CodeStored event".into()))?;
|
||||
Ok(code_stored.code_hash)
|
||||
}
|
||||
|
||||
async fn instantiate_with_code(&self) -> Result<(Hash, AccountId), Error> {
|
||||
tracing::info!("instantiate_with_code:");
|
||||
let code = wabt::wat2wasm(CONTRACT).expect("invalid wabt");
|
||||
|
||||
let instantiate_tx = node_runtime::tx().contracts().instantiate_with_code(
|
||||
100_000_000_000_000_000, // endowment
|
||||
Weight {
|
||||
ref_time: 500_000_000_000,
|
||||
proof_size: PROOF_SIZE,
|
||||
}, // gas_limit
|
||||
None, // storage_deposit_limit
|
||||
code,
|
||||
vec![], // data
|
||||
vec![], // salt
|
||||
);
|
||||
|
||||
let events = self
|
||||
.client()
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&instantiate_tx, &self.signer)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?;
|
||||
|
||||
let code_stored = events
|
||||
.find_first::<events::CodeStored>()?
|
||||
.ok_or_else(|| Error::Other("Failed to find a CodeStored event".into()))?;
|
||||
let instantiated = events
|
||||
.find_first::<events::Instantiated>()?
|
||||
.ok_or_else(|| Error::Other("Failed to find a Instantiated event".into()))?;
|
||||
let _extrinsic_success = events
|
||||
.find_first::<system::events::ExtrinsicSuccess>()?
|
||||
.ok_or_else(|| Error::Other("Failed to find a ExtrinsicSuccess event".into()))?;
|
||||
|
||||
tracing::info!(" Block hash: {:?}", events.block_hash());
|
||||
tracing::info!(" Code hash: {:?}", code_stored.code_hash);
|
||||
tracing::info!(" Contract address: {:?}", instantiated.contract);
|
||||
Ok((code_stored.code_hash, instantiated.contract))
|
||||
}
|
||||
|
||||
async fn instantiate(
|
||||
&self,
|
||||
code_hash: Hash,
|
||||
data: Vec<u8>,
|
||||
salt: Vec<u8>,
|
||||
) -> Result<AccountId, Error> {
|
||||
// call instantiate extrinsic
|
||||
let instantiate_tx = node_runtime::tx().contracts().instantiate(
|
||||
100_000_000_000_000_000, // endowment
|
||||
Weight {
|
||||
ref_time: 500_000_000_000,
|
||||
proof_size: PROOF_SIZE,
|
||||
}, // gas_limit
|
||||
None, // storage_deposit_limit
|
||||
code_hash,
|
||||
data,
|
||||
salt,
|
||||
);
|
||||
|
||||
let result = self
|
||||
.client()
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&instantiate_tx, &self.signer)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?;
|
||||
|
||||
tracing::info!("Instantiate result: {:?}", result);
|
||||
let instantiated = result
|
||||
.find_first::<events::Instantiated>()?
|
||||
.ok_or_else(|| Error::Other("Failed to find a Instantiated event".into()))?;
|
||||
|
||||
Ok(instantiated.contract)
|
||||
}
|
||||
|
||||
async fn call(
|
||||
&self,
|
||||
contract: AccountId,
|
||||
input_data: Vec<u8>,
|
||||
) -> Result<TxProgress<SubstrateConfig, OnlineClient<SubstrateConfig>>, Error> {
|
||||
tracing::info!("call: {:?}", contract);
|
||||
let call_tx = node_runtime::tx().contracts().call(
|
||||
MultiAddress::Id(contract),
|
||||
0, // value
|
||||
Weight {
|
||||
ref_time: 500_000_000,
|
||||
proof_size: PROOF_SIZE,
|
||||
}, // gas_limit
|
||||
None, // storage_deposit_limit
|
||||
input_data,
|
||||
);
|
||||
|
||||
let result = self
|
||||
.client()
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&call_tx, &self.signer)
|
||||
.await?;
|
||||
|
||||
tracing::info!("Call result: {:?}", result);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tx_instantiate_with_code() {
|
||||
let ctx = ContractsTestContext::init().await;
|
||||
let result = ctx.instantiate_with_code().await;
|
||||
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Error calling instantiate_with_code and receiving CodeStored and Instantiated Events: {result:?}"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tx_instantiate() {
|
||||
let ctx = ContractsTestContext::init().await;
|
||||
let code_hash = ctx.upload_code().await.unwrap();
|
||||
|
||||
let instantiated = ctx.instantiate(code_hash, vec![], vec![]).await;
|
||||
|
||||
assert!(
|
||||
instantiated.is_ok(),
|
||||
"Error instantiating contract: {instantiated:?}"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tx_call() {
|
||||
let cxt = ContractsTestContext::init().await;
|
||||
let (_, contract) = cxt.instantiate_with_code().await.unwrap();
|
||||
|
||||
let info_addr = node_runtime::storage()
|
||||
.contracts()
|
||||
.contract_info_of(&contract);
|
||||
|
||||
let info_addr_bytes = cxt.client().storage().address_bytes(&info_addr).unwrap();
|
||||
|
||||
let contract_info = cxt
|
||||
.client()
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await
|
||||
.unwrap()
|
||||
.fetch(&info_addr)
|
||||
.await;
|
||||
assert!(contract_info.is_ok());
|
||||
|
||||
let keys = cxt
|
||||
.client()
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await
|
||||
.unwrap()
|
||||
.fetch_keys(&info_addr_bytes, 10, None)
|
||||
.await
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|key| hex::encode(&key.0))
|
||||
.collect::<Vec<_>>();
|
||||
println!("keys post: {keys:?}");
|
||||
|
||||
let executed = cxt.call(contract, vec![]).await;
|
||||
|
||||
assert!(executed.is_ok(), "Error calling contract: {executed:?}");
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
//! Test interactions with some built-in FRAME pallets.
|
||||
|
||||
mod balances;
|
||||
mod contracts;
|
||||
mod staking;
|
||||
mod sudo;
|
||||
mod system;
|
||||
mod timestamp;
|
||||
@@ -0,0 +1,265 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use crate::{
|
||||
node_runtime::{
|
||||
self,
|
||||
runtime_types::{
|
||||
pallet_staking::{RewardDestination, ValidatorPrefs},
|
||||
sp_arithmetic::per_things::Perbill,
|
||||
},
|
||||
staking,
|
||||
},
|
||||
test_context,
|
||||
};
|
||||
use assert_matches::assert_matches;
|
||||
use subxt::error::{DispatchError, Error};
|
||||
use subxt_signer::{
|
||||
sr25519::{self, dev},
|
||||
SecretUri,
|
||||
};
|
||||
|
||||
/// Helper function to generate a crypto pair from seed
|
||||
fn get_from_seed(seed: &str) -> sr25519::Keypair {
|
||||
use std::str::FromStr;
|
||||
let uri = SecretUri::from_str(&format!("//{seed}")).expect("expected to be valid");
|
||||
sr25519::Keypair::from_uri(&uri).expect("expected to be valid")
|
||||
}
|
||||
|
||||
fn default_validator_prefs() -> ValidatorPrefs {
|
||||
ValidatorPrefs {
|
||||
commission: Perbill(0),
|
||||
blocked: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn validate_with_controller_account() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
|
||||
let tx = node_runtime::tx()
|
||||
.staking()
|
||||
.validate(default_validator_prefs());
|
||||
|
||||
api.tx()
|
||||
.sign_and_submit_then_watch_default(&tx, &alice)
|
||||
.await
|
||||
.unwrap()
|
||||
.wait_for_finalized_success()
|
||||
.await
|
||||
.expect("should be successful");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn validate_not_possible_for_stash_account() -> Result<(), Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice_stash = get_from_seed("Alice//stash");
|
||||
|
||||
let tx = node_runtime::tx()
|
||||
.staking()
|
||||
.validate(default_validator_prefs());
|
||||
|
||||
let announce_validator = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&tx, &alice_stash)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await;
|
||||
assert_matches!(announce_validator, Err(Error::Runtime(DispatchError::Module(err))) => {
|
||||
let details = err.details().unwrap();
|
||||
assert_eq!(details.pallet.name(), "Staking");
|
||||
assert_eq!(&details.variant.name, "NotController");
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn nominate_with_controller_account() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
let bob = dev::bob();
|
||||
|
||||
let tx = node_runtime::tx()
|
||||
.staking()
|
||||
.nominate(vec![bob.public_key().to_address()]);
|
||||
|
||||
api.tx()
|
||||
.sign_and_submit_then_watch_default(&tx, &alice)
|
||||
.await
|
||||
.unwrap()
|
||||
.wait_for_finalized_success()
|
||||
.await
|
||||
.expect("should be successful");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn nominate_not_possible_for_stash_account() -> Result<(), Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice_stash = get_from_seed("Alice//stash");
|
||||
let bob = dev::bob();
|
||||
|
||||
let tx = node_runtime::tx()
|
||||
.staking()
|
||||
.nominate(vec![bob.public_key().to_address()]);
|
||||
|
||||
let nomination = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&tx, &alice_stash)
|
||||
.await
|
||||
.unwrap()
|
||||
.wait_for_finalized_success()
|
||||
.await;
|
||||
|
||||
assert_matches!(nomination, Err(Error::Runtime(DispatchError::Module(err))) => {
|
||||
let details = err.details().unwrap();
|
||||
assert_eq!(details.pallet.name(), "Staking");
|
||||
assert_eq!(&details.variant.name, "NotController");
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn chill_works_for_controller_only() -> Result<(), Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice_stash = get_from_seed("Alice//stash");
|
||||
let bob_stash = get_from_seed("Bob//stash");
|
||||
let alice = dev::alice();
|
||||
|
||||
// this will fail the second time, which is why this is one test, not two
|
||||
let nominate_tx = node_runtime::tx()
|
||||
.staking()
|
||||
.nominate(vec![bob_stash.public_key().to_address()]);
|
||||
api.tx()
|
||||
.sign_and_submit_then_watch_default(&nominate_tx, &alice)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?;
|
||||
|
||||
let ledger_addr = node_runtime::storage()
|
||||
.staking()
|
||||
.ledger(alice.public_key().to_account_id());
|
||||
let ledger = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch(&ledger_addr)
|
||||
.await?
|
||||
.unwrap();
|
||||
assert_eq!(alice_stash.public_key().to_account_id(), ledger.stash);
|
||||
|
||||
let chill_tx = node_runtime::tx().staking().chill();
|
||||
|
||||
let chill = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&chill_tx, &alice_stash)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await;
|
||||
|
||||
assert_matches!(chill, Err(Error::Runtime(DispatchError::Module(err))) => {
|
||||
let details = err.details().unwrap();
|
||||
assert_eq!(details.pallet.name(), "Staking");
|
||||
assert_eq!(&details.variant.name, "NotController");
|
||||
});
|
||||
|
||||
let is_chilled = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&chill_tx, &alice)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?
|
||||
.has::<staking::events::Chilled>()?;
|
||||
assert!(is_chilled);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tx_bond() -> Result<(), Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
|
||||
let bond_tx = node_runtime::tx().staking().bond(
|
||||
dev::bob().public_key().into(),
|
||||
100_000_000_000_000,
|
||||
RewardDestination::Stash,
|
||||
);
|
||||
|
||||
let bond = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&bond_tx, &alice)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await;
|
||||
|
||||
assert!(bond.is_ok());
|
||||
|
||||
let bond_again = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&bond_tx, &alice)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await;
|
||||
|
||||
assert_matches!(bond_again, Err(Error::Runtime(DispatchError::Module(err))) => {
|
||||
let details = err.details().unwrap();
|
||||
assert_eq!(details.pallet.name(), "Staking");
|
||||
assert_eq!(&details.variant.name, "AlreadyBonded");
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn storage_history_depth() -> Result<(), Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
let history_depth_addr = node_runtime::constants().staking().history_depth();
|
||||
let history_depth = api.constants().at(&history_depth_addr)?;
|
||||
assert_eq!(history_depth, 84);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn storage_current_era() -> Result<(), Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
let current_era_addr = node_runtime::storage().staking().current_era();
|
||||
let _current_era = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch(¤t_era_addr)
|
||||
.await?
|
||||
.expect("current era always exists");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn storage_era_reward_points() -> Result<(), Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
let reward_points_addr = node_runtime::storage().staking().eras_reward_points(0);
|
||||
let current_era_result = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch(&reward_points_addr)
|
||||
.await;
|
||||
assert!(current_era_result.is_ok());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use crate::{
|
||||
node_runtime::{
|
||||
self,
|
||||
runtime_types::{self, sp_weights::weight_v2::Weight},
|
||||
sudo,
|
||||
},
|
||||
test_context,
|
||||
};
|
||||
use subxt_signer::sr25519::dev;
|
||||
|
||||
type Call = runtime_types::kitchensink_runtime::RuntimeCall;
|
||||
type BalancesCall = runtime_types::pallet_balances::pallet::Call;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sudo() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
let bob = dev::bob().public_key().into();
|
||||
|
||||
let call = Call::Balances(BalancesCall::transfer {
|
||||
dest: bob,
|
||||
value: 10_000,
|
||||
});
|
||||
let tx = node_runtime::tx().sudo().sudo(call);
|
||||
|
||||
let found_event = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&tx, &alice)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?
|
||||
.has::<sudo::events::Sudid>()?;
|
||||
|
||||
assert!(found_event);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sudo_unchecked_weight() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
let bob = dev::bob().public_key().into();
|
||||
|
||||
let call = Call::Balances(BalancesCall::transfer {
|
||||
dest: bob,
|
||||
value: 10_000,
|
||||
});
|
||||
let tx = node_runtime::tx().sudo().sudo_unchecked_weight(
|
||||
call,
|
||||
Weight {
|
||||
ref_time: 0,
|
||||
proof_size: 0,
|
||||
},
|
||||
);
|
||||
|
||||
let found_event = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&tx, &alice)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?
|
||||
.has::<sudo::events::Sudid>()?;
|
||||
|
||||
assert!(found_event);
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use crate::{
|
||||
node_runtime::{self, system},
|
||||
test_context,
|
||||
};
|
||||
use assert_matches::assert_matches;
|
||||
use subxt_signer::sr25519::dev;
|
||||
|
||||
#[tokio::test]
|
||||
async fn storage_account() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
|
||||
let account_info_addr = node_runtime::storage()
|
||||
.system()
|
||||
.account(alice.public_key().to_account_id());
|
||||
|
||||
let account_info = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&account_info_addr)
|
||||
.await;
|
||||
|
||||
assert_matches!(account_info, Ok(_));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tx_remark_with_event() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
|
||||
let tx = node_runtime::tx()
|
||||
.system()
|
||||
.remark_with_event(b"remarkable".to_vec());
|
||||
|
||||
let found_event = api
|
||||
.tx()
|
||||
.sign_and_submit_then_watch_default(&tx, &alice)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?
|
||||
.has::<system::events::Remarked>()?;
|
||||
|
||||
assert!(found_event);
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use crate::{node_runtime, test_context};
|
||||
|
||||
#[tokio::test]
|
||||
async fn storage_get_current_timestamp() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let timestamp = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await
|
||||
.unwrap()
|
||||
.fetch(&node_runtime::storage().timestamp().now())
|
||||
.await;
|
||||
|
||||
assert!(timestamp.is_ok())
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
mod validation;
|
||||
@@ -0,0 +1,299 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use crate::{node_runtime, test_context, TestContext};
|
||||
use frame_metadata::v15::{
|
||||
ExtrinsicMetadata, PalletCallMetadata, PalletMetadata, PalletStorageMetadata,
|
||||
RuntimeMetadataV15, StorageEntryMetadata, StorageEntryModifier, StorageEntryType,
|
||||
};
|
||||
use scale_info::{
|
||||
build::{Fields, Variants},
|
||||
meta_type, Path, Type, TypeInfo,
|
||||
};
|
||||
use subxt::{Metadata, OfflineClient, SubstrateConfig};
|
||||
|
||||
async fn metadata_to_api(metadata: Metadata, ctx: &TestContext) -> OfflineClient<SubstrateConfig> {
|
||||
OfflineClient::new(
|
||||
ctx.client().genesis_hash(),
|
||||
ctx.client().runtime_version(),
|
||||
metadata,
|
||||
)
|
||||
}
|
||||
|
||||
fn v15_to_metadata(v15: RuntimeMetadataV15) -> Metadata {
|
||||
let subxt_md: subxt_metadata::Metadata = v15.try_into().unwrap();
|
||||
subxt_md.into()
|
||||
}
|
||||
|
||||
fn modified_metadata<F>(metadata: Metadata, f: F) -> Metadata
|
||||
where
|
||||
F: FnOnce(&mut RuntimeMetadataV15),
|
||||
{
|
||||
let mut metadata = RuntimeMetadataV15::from((*metadata).clone());
|
||||
f(&mut metadata);
|
||||
v15_to_metadata(metadata)
|
||||
}
|
||||
|
||||
fn default_pallet() -> PalletMetadata {
|
||||
PalletMetadata {
|
||||
name: "Test",
|
||||
storage: None,
|
||||
calls: None,
|
||||
event: None,
|
||||
constants: vec![],
|
||||
error: None,
|
||||
index: 0,
|
||||
docs: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn pallets_to_metadata(pallets: Vec<PalletMetadata>) -> Metadata {
|
||||
// Extrinsic needs to contain at least the generic type parameter "Call"
|
||||
// for the metadata to be valid.
|
||||
// The "Call" type from the metadata is used to decode extrinsics.
|
||||
// In reality, the extrinsic type has "Call", "Address", "Extra", "Signature" generic types.
|
||||
#[allow(unused)]
|
||||
#[derive(TypeInfo)]
|
||||
struct ExtrinsicType<Call> {
|
||||
call: Call,
|
||||
}
|
||||
// Because this type is used to decode extrinsics, we expect this to be a TypeDefVariant.
|
||||
// Each pallet must contain one single variant.
|
||||
#[allow(unused)]
|
||||
#[derive(TypeInfo)]
|
||||
enum RuntimeCall {
|
||||
PalletName(Pallet),
|
||||
}
|
||||
// The calls of the pallet.
|
||||
#[allow(unused)]
|
||||
#[derive(TypeInfo)]
|
||||
enum Pallet {
|
||||
#[allow(unused)]
|
||||
SomeCall,
|
||||
}
|
||||
|
||||
v15_to_metadata(RuntimeMetadataV15::new(
|
||||
pallets,
|
||||
ExtrinsicMetadata {
|
||||
ty: meta_type::<ExtrinsicType<RuntimeCall>>(),
|
||||
version: 0,
|
||||
signed_extensions: vec![],
|
||||
},
|
||||
meta_type::<()>(),
|
||||
vec![],
|
||||
))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn full_metadata_check() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
// Runtime metadata is identical to the metadata used during API generation.
|
||||
assert!(node_runtime::is_codegen_valid_for(&api.metadata()));
|
||||
|
||||
// Modify the metadata.
|
||||
let metadata = modified_metadata(api.metadata(), |md| {
|
||||
md.pallets[0].name = "NewPallet".to_string();
|
||||
});
|
||||
|
||||
// It should now be invalid:
|
||||
assert!(!node_runtime::is_codegen_valid_for(&metadata));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn constant_values_are_not_validated() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let deposit_addr = node_runtime::constants().balances().existential_deposit();
|
||||
|
||||
// Retrieve existential deposit to validate it and confirm that it's OK.
|
||||
assert!(api.constants().at(&deposit_addr).is_ok());
|
||||
|
||||
// Modify the metadata.
|
||||
let metadata = modified_metadata(api.metadata(), |md| {
|
||||
let mut existential = md
|
||||
.pallets
|
||||
.iter_mut()
|
||||
.find(|pallet| pallet.name == "Balances")
|
||||
.expect("Metadata must contain Balances pallet")
|
||||
.constants
|
||||
.iter_mut()
|
||||
.find(|constant| constant.name == "ExistentialDeposit")
|
||||
.expect("ExistentialDeposit constant must be present");
|
||||
|
||||
// Modifying a constant value should not lead to an error:
|
||||
existential.value = vec![0u8; 32];
|
||||
});
|
||||
|
||||
let api = metadata_to_api(metadata, &ctx).await;
|
||||
|
||||
assert!(node_runtime::is_codegen_valid_for(&api.metadata()));
|
||||
assert!(api.constants().at(&deposit_addr).is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn calls_check() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let unbond_tx = node_runtime::tx().staking().unbond(123_456_789_012_345);
|
||||
let withdraw_unbonded_addr = node_runtime::tx().staking().withdraw_unbonded(10);
|
||||
|
||||
// Ensure that `Unbond` and `WinthdrawUnbonded` calls are compatible before altering the metadata.
|
||||
assert!(api.tx().validate(&unbond_tx).is_ok());
|
||||
assert!(api.tx().validate(&withdraw_unbonded_addr).is_ok());
|
||||
|
||||
// Reconstruct the `Staking` call as is.
|
||||
struct CallRec;
|
||||
impl TypeInfo for CallRec {
|
||||
type Identity = Self;
|
||||
fn type_info() -> Type {
|
||||
Type::builder()
|
||||
.path(Path::new("Call", "pallet_staking::pallet::pallet"))
|
||||
.variant(
|
||||
Variants::new()
|
||||
.variant("unbond", |v| {
|
||||
v.index(0).fields(Fields::named().field(|f| {
|
||||
f.compact::<u128>().name("value").type_name("BalanceOf<T>")
|
||||
}))
|
||||
})
|
||||
.variant("withdraw_unbonded", |v| {
|
||||
v.index(1).fields(Fields::named().field(|f| {
|
||||
f.ty::<u32>().name("num_slashing_spans").type_name("u32")
|
||||
}))
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
let pallet = PalletMetadata {
|
||||
name: "Staking",
|
||||
calls: Some(PalletCallMetadata {
|
||||
ty: meta_type::<CallRec>(),
|
||||
}),
|
||||
..default_pallet()
|
||||
};
|
||||
let metadata = pallets_to_metadata(vec![pallet]);
|
||||
let api = metadata_to_api(metadata, &ctx).await;
|
||||
|
||||
// The calls should still be valid with this new type info:
|
||||
assert!(api.tx().validate(&unbond_tx).is_ok());
|
||||
assert!(api.tx().validate(&withdraw_unbonded_addr).is_ok());
|
||||
|
||||
// Change `Unbond` call but leave the rest as is.
|
||||
struct CallRecSecond;
|
||||
impl TypeInfo for CallRecSecond {
|
||||
type Identity = Self;
|
||||
fn type_info() -> Type {
|
||||
Type::builder()
|
||||
.path(Path::new("Call", "pallet_staking::pallet::pallet"))
|
||||
.variant(
|
||||
Variants::new()
|
||||
.variant("unbond", |v| {
|
||||
v.index(0).fields(Fields::named().field(|f| {
|
||||
// Is of type u32 instead of u128.
|
||||
f.compact::<u32>().name("value").type_name("BalanceOf<T>")
|
||||
}))
|
||||
})
|
||||
.variant("withdraw_unbonded", |v| {
|
||||
v.index(1).fields(Fields::named().field(|f| {
|
||||
f.ty::<u32>().name("num_slashing_spans").type_name("u32")
|
||||
}))
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
let pallet = PalletMetadata {
|
||||
name: "Staking",
|
||||
calls: Some(PalletCallMetadata {
|
||||
ty: meta_type::<CallRecSecond>(),
|
||||
}),
|
||||
..default_pallet()
|
||||
};
|
||||
let metadata = pallets_to_metadata(vec![pallet]);
|
||||
let api = metadata_to_api(metadata, &ctx).await;
|
||||
|
||||
// Unbond call should fail, while withdraw_unbonded remains compatible.
|
||||
assert!(api.tx().validate(&unbond_tx).is_err());
|
||||
assert!(api.tx().validate(&withdraw_unbonded_addr).is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn storage_check() {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let tx_count_addr = node_runtime::storage().system().extrinsic_count();
|
||||
let tx_len_addr = node_runtime::storage().system().all_extrinsics_len();
|
||||
|
||||
// Ensure that `ExtrinsicCount` and `EventCount` storages are compatible before altering the metadata.
|
||||
assert!(api.storage().validate(&tx_count_addr).is_ok());
|
||||
assert!(api.storage().validate(&tx_len_addr).is_ok());
|
||||
|
||||
// Reconstruct the storage.
|
||||
let storage = PalletStorageMetadata {
|
||||
prefix: "System",
|
||||
entries: vec![
|
||||
StorageEntryMetadata {
|
||||
name: "ExtrinsicCount",
|
||||
modifier: StorageEntryModifier::Optional,
|
||||
ty: StorageEntryType::Plain(meta_type::<u32>()),
|
||||
default: vec![0],
|
||||
docs: vec![],
|
||||
},
|
||||
StorageEntryMetadata {
|
||||
name: "AllExtrinsicsLen",
|
||||
modifier: StorageEntryModifier::Optional,
|
||||
ty: StorageEntryType::Plain(meta_type::<u32>()),
|
||||
default: vec![0],
|
||||
docs: vec![],
|
||||
},
|
||||
],
|
||||
};
|
||||
let pallet = PalletMetadata {
|
||||
name: "System",
|
||||
storage: Some(storage),
|
||||
..default_pallet()
|
||||
};
|
||||
let metadata = pallets_to_metadata(vec![pallet]);
|
||||
let api = metadata_to_api(metadata, &ctx).await;
|
||||
|
||||
// The addresses should still validate:
|
||||
assert!(api.storage().validate(&tx_count_addr).is_ok());
|
||||
assert!(api.storage().validate(&tx_len_addr).is_ok());
|
||||
|
||||
// Reconstruct the storage while modifying ExtrinsicCount.
|
||||
let storage = PalletStorageMetadata {
|
||||
prefix: "System",
|
||||
entries: vec![
|
||||
StorageEntryMetadata {
|
||||
name: "ExtrinsicCount",
|
||||
modifier: StorageEntryModifier::Optional,
|
||||
// Previously was u32.
|
||||
ty: StorageEntryType::Plain(meta_type::<u8>()),
|
||||
default: vec![0],
|
||||
docs: vec![],
|
||||
},
|
||||
StorageEntryMetadata {
|
||||
name: "AllExtrinsicsLen",
|
||||
modifier: StorageEntryModifier::Optional,
|
||||
ty: StorageEntryType::Plain(meta_type::<u32>()),
|
||||
default: vec![0],
|
||||
docs: vec![],
|
||||
},
|
||||
],
|
||||
};
|
||||
let pallet = PalletMetadata {
|
||||
name: "System",
|
||||
storage: Some(storage),
|
||||
..default_pallet()
|
||||
};
|
||||
let metadata = pallets_to_metadata(vec![pallet]);
|
||||
let api = metadata_to_api(metadata, &ctx).await;
|
||||
|
||||
// The count route should fail now; the other will be ok still.
|
||||
assert!(api.storage().validate(&tx_count_addr).is_err());
|
||||
assert!(api.storage().validate(&tx_len_addr).is_ok());
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
#[cfg(test)]
|
||||
mod blocks;
|
||||
#[cfg(test)]
|
||||
mod client;
|
||||
#[cfg(test)]
|
||||
mod frame;
|
||||
#[cfg(test)]
|
||||
mod metadata;
|
||||
#[cfg(test)]
|
||||
mod runtime_api;
|
||||
#[cfg(test)]
|
||||
mod storage;
|
||||
@@ -0,0 +1,49 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use crate::{node_runtime, test_context};
|
||||
use subxt::utils::AccountId32;
|
||||
use subxt_signer::sr25519::dev;
|
||||
|
||||
#[tokio::test]
|
||||
async fn account_nonce() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let alice = dev::alice();
|
||||
let alice_account_id: AccountId32 = alice.public_key().into();
|
||||
|
||||
// Check Alice nonce is starting from 0.
|
||||
let runtime_api_call = node_runtime::apis()
|
||||
.account_nonce_api()
|
||||
.account_nonce(alice_account_id.clone());
|
||||
let nonce = api
|
||||
.runtime_api()
|
||||
.at_latest()
|
||||
.await?
|
||||
.call(runtime_api_call)
|
||||
.await?;
|
||||
assert_eq!(nonce, 0);
|
||||
|
||||
// Do some transaction to bump the Alice nonce to 1:
|
||||
let remark_tx = node_runtime::tx().system().remark(vec![1, 2, 3, 4, 5]);
|
||||
api.tx()
|
||||
.sign_and_submit_then_watch_default(&remark_tx, &alice)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?;
|
||||
|
||||
let runtime_api_call = node_runtime::apis()
|
||||
.account_nonce_api()
|
||||
.account_nonce(alice_account_id);
|
||||
let nonce = api
|
||||
.runtime_api()
|
||||
.at_latest()
|
||||
.await?
|
||||
.call(runtime_api_call)
|
||||
.await?;
|
||||
assert_eq!(nonce, 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,127 @@
|
||||
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use crate::{node_runtime, test_context, utils::wait_for_blocks};
|
||||
use subxt::utils::AccountId32;
|
||||
use subxt_signer::sr25519::dev;
|
||||
|
||||
#[tokio::test]
|
||||
async fn storage_plain_lookup() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
// Look up a plain value. Wait long enough that we don't get the genesis block data,
|
||||
// because it may have no storage associated with it.
|
||||
wait_for_blocks(&api).await;
|
||||
|
||||
let addr = node_runtime::storage().timestamp().now();
|
||||
let entry = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&addr)
|
||||
.await?;
|
||||
assert!(entry > 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn storage_map_lookup() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
let signer = dev::alice();
|
||||
let alice: AccountId32 = dev::alice().public_key().into();
|
||||
|
||||
// Do some transaction to bump the Alice nonce to 1:
|
||||
let remark_tx = node_runtime::tx().system().remark(vec![1, 2, 3, 4, 5]);
|
||||
api.tx()
|
||||
.sign_and_submit_then_watch_default(&remark_tx, &signer)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?;
|
||||
|
||||
// Look up the nonce for the user (we expect it to be 1).
|
||||
let nonce_addr = node_runtime::storage().system().account(alice);
|
||||
let entry = api
|
||||
.storage()
|
||||
.at_latest()
|
||||
.await?
|
||||
.fetch_or_default(&nonce_addr)
|
||||
.await?;
|
||||
assert_eq!(entry.nonce, 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// This fails until the fix in https://github.com/paritytech/subxt/pull/458 is introduced.
|
||||
// Here we create a key that looks a bit like a StorageNMap key, but should in fact be
|
||||
// treated as a StorageKey (ie we should hash both values together with one hasher, rather
|
||||
// than hash both values separately, or ignore the second value).
|
||||
#[tokio::test]
|
||||
async fn storage_n_mapish_key_is_properly_created() -> Result<(), subxt::Error> {
|
||||
use codec::Encode;
|
||||
use node_runtime::runtime_types::sp_core::crypto::KeyTypeId;
|
||||
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
// This is what the generated code hashes a `session().key_owner(..)` key into:
|
||||
let actual_key = node_runtime::storage()
|
||||
.session()
|
||||
.key_owner(KeyTypeId([1, 2, 3, 4]), [5u8, 6, 7, 8]);
|
||||
let actual_key_bytes = api.storage().address_bytes(&actual_key)?;
|
||||
|
||||
// Let's manually hash to what we assume it should be and compare:
|
||||
let expected_key_bytes = {
|
||||
// Hash the prefix to the storage entry:
|
||||
let mut bytes = sp_core::twox_128("Session".as_bytes()).to_vec();
|
||||
bytes.extend(&sp_core::twox_128("KeyOwner".as_bytes())[..]);
|
||||
// twox64_concat a *tuple* of the args expected:
|
||||
let suffix = (KeyTypeId([1, 2, 3, 4]), vec![5u8, 6, 7, 8]).encode();
|
||||
bytes.extend(sp_core::twox_64(&suffix));
|
||||
bytes.extend(&suffix);
|
||||
bytes
|
||||
};
|
||||
|
||||
assert_eq!(actual_key_bytes, expected_key_bytes);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn storage_n_map_storage_lookup() -> Result<(), subxt::Error> {
|
||||
let ctx = test_context().await;
|
||||
let api = ctx.client();
|
||||
|
||||
// Boilerplate; we create a new asset class with ID 99, and then
|
||||
// we "approveTransfer" of some of this asset class. This gives us an
|
||||
// entry in the `Approvals` StorageNMap that we can try to look up.
|
||||
let signer = dev::alice();
|
||||
let alice: AccountId32 = dev::alice().public_key().into();
|
||||
let bob: AccountId32 = dev::bob().public_key().into();
|
||||
|
||||
let tx1 = node_runtime::tx()
|
||||
.assets()
|
||||
.create(99, alice.clone().into(), 1);
|
||||
let tx2 = node_runtime::tx()
|
||||
.assets()
|
||||
.approve_transfer(99, bob.clone().into(), 123);
|
||||
api.tx()
|
||||
.sign_and_submit_then_watch_default(&tx1, &signer)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?;
|
||||
api.tx()
|
||||
.sign_and_submit_then_watch_default(&tx2, &signer)
|
||||
.await?
|
||||
.wait_for_finalized_success()
|
||||
.await?;
|
||||
|
||||
// The actual test; look up this approval in storage:
|
||||
let addr = node_runtime::storage().assets().approvals(99, alice, bob);
|
||||
let entry = api.storage().at_latest().await?.fetch(&addr).await?;
|
||||
assert_eq!(entry.map(|a| a.amount), Some(123));
|
||||
Ok(())
|
||||
}
|
||||
Reference in New Issue
Block a user