Merge remote-tracking branch 'origin/master' into lexnv/update-smoldot

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>
This commit is contained in:
Alexandru Vasile
2024-04-03 19:05:40 +03:00
190 changed files with 27843 additions and 18050 deletions
+1 -1
View File
@@ -34,7 +34,7 @@ scale-info = { workspace = true, features = ["bit-vec"] }
sp-core = { workspace = true }
syn = { workspace = true }
subxt = { workspace = true, features = ["unstable-metadata", "native", "jsonrpsee", "substrate-compat"] }
subxt-signer = { workspace = true }
subxt-signer = { workspace = true, features = ["default"] }
subxt-codegen = { workspace = true }
subxt-metadata = { workspace = true }
test-runtime = { workspace = true }
@@ -14,7 +14,9 @@ use subxt::{
FollowEvent, Initialized, MethodResponse, RuntimeEvent, RuntimeVersionEvent, StorageQuery,
StorageQueryType,
},
utils::AccountId32,
config::Hasher,
utils::{AccountId32, MultiAddress},
SubstrateConfig,
};
use subxt_signer::sr25519::dev;
@@ -32,7 +34,7 @@ async fn chainhead_unstable_follow() {
assert_eq!(
event,
FollowEvent::Initialized(Initialized {
finalized_block_hash,
finalized_block_hashes: vec![finalized_block_hash],
finalized_block_runtime: None,
})
);
@@ -47,10 +49,10 @@ async fn chainhead_unstable_follow() {
assert_matches!(
event,
FollowEvent::Initialized(init) => {
assert_eq!(init.finalized_block_hash, finalized_block_hash);
assert_eq!(init.finalized_block_hashes, vec![finalized_block_hash]);
if let Some(RuntimeEvent::Valid(RuntimeVersionEvent { spec })) = init.finalized_block_runtime {
assert_eq!(spec.spec_version, runtime_version.spec_version);
assert_eq!(spec.transaction_version, runtime_version.transaction_version);
assert_eq!(spec.spec_version, runtime_version.spec_version());
assert_eq!(spec.transaction_version, runtime_version.transaction_version());
} else {
panic!("runtime details not provided with init event, got {:?}", init.finalized_block_runtime);
}
@@ -66,7 +68,7 @@ async fn chainhead_unstable_body() {
let mut blocks = rpc.chainhead_unstable_follow(false).await.unwrap();
let event = blocks.next().await.unwrap().unwrap();
let hash = match event {
FollowEvent::Initialized(init) => init.finalized_block_hash,
FollowEvent::Initialized(init) => init.finalized_block_hashes.last().unwrap().clone(),
_ => panic!("Unexpected event"),
};
let sub_id = blocks.subscription_id().unwrap();
@@ -95,7 +97,7 @@ async fn chainhead_unstable_header() {
let mut blocks = rpc.chainhead_unstable_follow(false).await.unwrap();
let event = blocks.next().await.unwrap().unwrap();
let hash = match event {
FollowEvent::Initialized(init) => init.finalized_block_hash,
FollowEvent::Initialized(init) => init.finalized_block_hashes.last().unwrap().clone(),
_ => panic!("Unexpected event"),
};
let sub_id = blocks.subscription_id().unwrap();
@@ -123,7 +125,7 @@ async fn chainhead_unstable_storage() {
let mut blocks = rpc.chainhead_unstable_follow(false).await.unwrap();
let event = blocks.next().await.unwrap().unwrap();
let hash = match event {
FollowEvent::Initialized(init) => init.finalized_block_hash,
FollowEvent::Initialized(init) => init.finalized_block_hashes.last().unwrap().clone(),
_ => panic!("Unexpected event"),
};
let sub_id = blocks.subscription_id().unwrap();
@@ -168,7 +170,7 @@ async fn chainhead_unstable_call() {
let mut blocks = rpc.chainhead_unstable_follow(true).await.unwrap();
let event = blocks.next().await.unwrap().unwrap();
let hash = match event {
FollowEvent::Initialized(init) => init.finalized_block_hash,
FollowEvent::Initialized(init) => init.finalized_block_hashes.last().unwrap().clone(),
_ => panic!("Unexpected event"),
};
let sub_id = blocks.subscription_id().unwrap();
@@ -205,7 +207,7 @@ async fn chainhead_unstable_unpin() {
let mut blocks = rpc.chainhead_unstable_follow(true).await.unwrap();
let event = blocks.next().await.unwrap().unwrap();
let hash = match event {
FollowEvent::Initialized(init) => init.finalized_block_hash,
FollowEvent::Initialized(init) => init.finalized_block_hashes.last().unwrap().clone(),
_ => panic!("Unexpected event"),
};
let sub_id = blocks.subscription_id().unwrap();
@@ -261,7 +263,7 @@ async fn transaction_unstable_submit_and_watch() {
let tx_bytes = ctx
.client()
.tx()
.create_signed_with_nonce(&payload, &dev::alice(), 0, Default::default())
.create_signed_offline(&payload, &dev::alice(), Default::default())
.unwrap()
.into_encoded();
@@ -309,3 +311,108 @@ async fn next_operation_event<
panic!("Cannot find operation related event after {NUM_EVENTS} produced events");
}
#[tokio::test]
async fn transaction_unstable_broadcast() {
let bob = dev::bob();
let bob_address: MultiAddress<AccountId32, u32> = bob.public_key().into();
let ctx = test_context().await;
let api = ctx.client();
let rpc = ctx.unstable_rpc_methods().await;
let tx = node_runtime::tx()
.balances()
.transfer_allow_death(bob_address.clone(), 10_001);
let tx_bytes = ctx
.client()
.tx()
.create_signed_offline(&tx, &dev::alice(), Default::default())
.unwrap()
.into_encoded();
let tx_hash = <SubstrateConfig as subxt::Config>::Hasher::hash(&tx_bytes[2..]);
// Subscribe to finalized blocks.
let mut finalized_sub = api.blocks().subscribe_finalized().await.unwrap();
// Expect the tx to be encountered in a maximum number of blocks.
let mut num_blocks: usize = 10;
// Submit the transaction.
let _operation_id = rpc
.transaction_unstable_broadcast(&tx_bytes)
.await
.unwrap()
.expect("Server is not overloaded by 1 tx; qed");
while let Some(finalized) = finalized_sub.next().await {
let finalized = finalized.unwrap();
// Started with positive, should not overflow.
num_blocks = num_blocks.saturating_sub(1);
if num_blocks == 0 {
panic!("Did not find the tx in due time");
}
let extrinsics = finalized.extrinsics().await.unwrap();
let block_extrinsics = extrinsics
.iter()
.map(|res| res.unwrap())
.collect::<Vec<_>>();
let Some(ext) = block_extrinsics
.iter()
.find(|ext| <SubstrateConfig as subxt::Config>::Hasher::hash(ext.bytes()) == tx_hash)
else {
continue;
};
let ext = ext
.as_extrinsic::<node_runtime::balances::calls::types::TransferAllowDeath>()
.unwrap()
.unwrap();
assert_eq!(ext.value, 10_001);
return;
}
}
#[tokio::test]
async fn transaction_unstable_stop() {
let bob = dev::bob();
let bob_address: MultiAddress<AccountId32, u32> = bob.public_key().into();
let ctx = test_context().await;
let rpc = ctx.unstable_rpc_methods().await;
// Cannot stop an operation that was not started.
let _err = rpc
.transaction_unstable_stop("non-existent-operation-id")
.await
.unwrap_err();
// Submit a transaction and stop it.
let tx = node_runtime::tx()
.balances()
.transfer_allow_death(bob_address.clone(), 10_001);
let tx_bytes = ctx
.client()
.tx()
.create_signed_offline(&tx, &dev::alice(), Default::default())
.unwrap()
.into_encoded();
// Submit the transaction.
let operation_id = rpc
.transaction_unstable_broadcast(&tx_bytes)
.await
.unwrap()
.expect("Server is not overloaded by 1 tx; qed");
let _ = rpc.transaction_unstable_stop(&operation_id).await.unwrap();
// Cannot stop it twice.
let _err = rpc
.transaction_unstable_stop(&operation_id)
.await
.unwrap_err();
}
@@ -1,214 +0,0 @@
// Copyright 2019-2023 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
use frame_metadata::{
v15::{ExtrinsicMetadata, RuntimeMetadataV15},
RuntimeMetadataPrefixed,
};
use scale_info::{meta_type, IntoPortable, PortableRegistry, Registry, TypeInfo};
use subxt_codegen::CodegenBuilder;
use syn::__private::quote;
fn generate_runtime_interface_from_metadata(metadata: RuntimeMetadataPrefixed) -> String {
// Generate a runtime interface from the provided metadata.
let metadata = metadata
.try_into()
.expect("frame_metadata should be convertible into Metadata");
CodegenBuilder::new()
.no_docs()
.generate(metadata)
.expect("API generation must be valid")
.to_string()
}
fn generate_runtime_interface_with_type_registry<F>(f: F) -> String
where
F: Fn(&mut scale_info::Registry),
{
#[derive(TypeInfo)]
struct Runtime;
#[derive(TypeInfo)]
enum RuntimeCall {}
#[derive(TypeInfo)]
enum RuntimeEvent {}
#[derive(TypeInfo)]
pub enum DispatchError {}
// We need these types for codegen to work:
let mut registry = scale_info::Registry::new();
let ty = registry.register_type(&meta_type::<Runtime>());
registry.register_type(&meta_type::<RuntimeCall>());
registry.register_type(&meta_type::<RuntimeEvent>());
registry.register_type(&meta_type::<DispatchError>());
// Allow custom types to be added for testing:
f(&mut registry);
let extrinsic = ExtrinsicMetadata {
ty: meta_type::<()>(),
version: 0,
signed_extensions: vec![],
}
.into_portable(&mut registry);
let metadata = RuntimeMetadataV15 {
types: registry.into(),
pallets: Vec::new(),
extrinsic,
ty,
apis: vec![],
};
let metadata = RuntimeMetadataPrefixed::from(metadata);
generate_runtime_interface_from_metadata(metadata)
}
#[test]
fn dupe_types_do_not_overwrite_each_other() {
let interface = generate_runtime_interface_with_type_registry(|registry| {
// Now we duplicate some types with same type info. We need two unique types here,
// and can't just add one type to the registry twice, because the registry knows if
// type IDs are the same.
enum Foo {}
impl TypeInfo for Foo {
type Identity = Self;
fn type_info() -> scale_info::Type {
scale_info::Type::builder()
.path(scale_info::Path::new("DuplicateType", "dupe_mod"))
.variant(
scale_info::build::Variants::new()
.variant("FirstDupeTypeVariant", |builder| builder.index(0)),
)
}
}
enum Bar {}
impl TypeInfo for Bar {
type Identity = Self;
fn type_info() -> scale_info::Type {
scale_info::Type::builder()
.path(scale_info::Path::new("DuplicateType", "dupe_mod"))
.variant(
scale_info::build::Variants::new()
.variant("SecondDupeTypeVariant", |builder| builder.index(0)),
)
}
}
registry.register_type(&meta_type::<Foo>());
registry.register_type(&meta_type::<Bar>());
});
assert!(interface.contains("DuplicateType"));
assert!(interface.contains("FirstDupeTypeVariant"));
assert!(interface.contains("DuplicateType2"));
assert!(interface.contains("SecondDupeTypeVariant"));
}
#[test]
fn generic_types_overwrite_each_other() {
let interface = generate_runtime_interface_with_type_registry(|registry| {
// If we have two types mentioned in the registry that have generic params,
// only one type will be output (the codegen assumes that the generic param will disambiguate)
enum Foo {}
impl TypeInfo for Foo {
type Identity = Self;
fn type_info() -> scale_info::Type {
scale_info::Type::builder()
.path(scale_info::Path::new("DuplicateType", "dupe_mod"))
.type_params([scale_info::TypeParameter::new("T", Some(meta_type::<u8>()))])
.variant(scale_info::build::Variants::new())
}
}
enum Bar {}
impl TypeInfo for Bar {
type Identity = Self;
fn type_info() -> scale_info::Type {
scale_info::Type::builder()
.path(scale_info::Path::new("DuplicateType", "dupe_mod"))
.type_params([scale_info::TypeParameter::new("T", Some(meta_type::<u8>()))])
.variant(scale_info::build::Variants::new())
}
}
registry.register_type(&meta_type::<Foo>());
registry.register_type(&meta_type::<Bar>());
});
assert!(interface.contains("DuplicateType"));
// We do _not_ expect this to exist, since a generic is present on the type:
assert!(!interface.contains("DuplicateType2"));
}
#[test]
fn more_than_1_generic_parameters_work() {
#[allow(unused)]
#[derive(TypeInfo)]
struct Foo<T, U, V, W> {
a: T,
b: U,
c: V,
d: W,
}
#[allow(unused)]
#[derive(TypeInfo)]
struct Bar {
p: Foo<u32, u32, u64, u128>,
q: Foo<u8, u8, u8, u8>,
}
let mut registry = Registry::new();
registry.register_type(&meta_type::<Bar>());
let portable_types: PortableRegistry = registry.into();
let type_gen = subxt_codegen::TypeGenerator::new(
&portable_types,
"root",
Default::default(),
Default::default(),
CratePath::default(),
false,
);
let types = type_gen.generate_types_mod().unwrap();
let generated_mod = quote::quote!( #types);
let expected_mod = quote::quote! {
pub mod root {
use super::root;
pub mod integration_tests {
use super::root;
pub mod codegen {
use super::root;
pub mod codegen_tests {
use super::root;
pub struct Bar {
pub p: root::integration_tests::codegen::codegen_tests::Foo<
::core::primitive::u32,
::core::primitive::u32,
::core::primitive::u64,
::core::primitive::u128
>,
pub q: root::integration_tests::codegen::codegen_tests::Foo<
::core::primitive::u8,
::core::primitive::u8,
::core::primitive::u8,
::core::primitive::u8
>,
}
pub struct Foo<_0, _1, _2, _3> {
pub a: _0,
pub b: _1,
pub c: _2,
pub d: _3,
}
}
}
}
}
};
assert_eq!(generated_mod.to_string(), expected_mod.to_string());
}
@@ -4,7 +4,7 @@
use codec::Decode;
use regex::Regex;
use subxt_codegen::{ syn, CodegenBuilder };
use subxt_codegen::{syn, CodegenBuilder};
use subxt_metadata::Metadata;
fn load_test_metadata() -> Metadata {
@@ -14,5 +14,4 @@
#[allow(clippy::all)]
mod polkadot;
mod codegen_tests;
mod documentation;
File diff suppressed because one or more lines are too long
@@ -102,7 +102,6 @@ impl ContractsTestContext {
.find_first::<system::events::ExtrinsicSuccess>()?
.ok_or_else(|| Error::Other("Failed to find a ExtrinsicSuccess event".into()))?;
tracing::info!(" Block hash: {:?}", events.block_hash());
tracing::info!(" Code hash: {:?}", code_stored.code_hash);
tracing::info!(" Contract address: {:?}", instantiated.contract);
Ok((code_stored.code_hash, instantiated.contract))
@@ -2,15 +2,10 @@
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
#[cfg(test)]
mod blocks;
#[cfg(test)]
mod client;
#[cfg(test)]
mod codegen;
mod frame;
#[cfg(test)]
mod metadata;
#[cfg(test)]
mod runtime_api;
#[cfg(test)]
mod storage;
@@ -56,10 +56,6 @@ async fn storage_map_lookup() -> Result<(), subxt::Error> {
Ok(())
}
// This fails until the fix in https://github.com/paritytech/subxt/pull/458 is introduced.
// Here we create a key that looks a bit like a StorageNMap key, but should in fact be
// treated as a StorageKey (ie we should hash both values together with one hasher, rather
// than hash both values separately, or ignore the second value).
#[tokio::test]
async fn storage_n_mapish_key_is_properly_created() -> Result<(), subxt::Error> {
use codec::Encode;
@@ -73,18 +69,21 @@ async fn storage_n_mapish_key_is_properly_created() -> Result<(), subxt::Error>
.session()
.key_owner(KeyTypeId([1, 2, 3, 4]), [5u8, 6, 7, 8]);
let actual_key_bytes = api.storage().address_bytes(&actual_key)?;
// Let's manually hash to what we assume it should be and compare:
let expected_key_bytes = {
// Hash the prefix to the storage entry:
let mut bytes = sp_core::twox_128("Session".as_bytes()).to_vec();
bytes.extend(&sp_core::twox_128("KeyOwner".as_bytes())[..]);
// twox64_concat a *tuple* of the args expected:
let suffix = (KeyTypeId([1, 2, 3, 4]), vec![5u8, 6, 7, 8]).encode();
bytes.extend(sp_core::twox_64(&suffix));
bytes.extend(&suffix);
// Both keys, use twox64_concat hashers:
let key1 = KeyTypeId([1, 2, 3, 4]).encode();
let key2 = vec![5u8, 6, 7, 8].encode();
bytes.extend(sp_core::twox_64(&key1));
bytes.extend(&key1);
bytes.extend(sp_core::twox_64(&key2));
bytes.extend(&key2);
bytes
};
dbg!(&expected_key_bytes);
assert_eq!(actual_key_bytes, expected_key_bytes);
Ok(())
@@ -167,9 +166,9 @@ async fn storage_partial_lookup() -> Result<(), subxt::Error> {
let addr_bytes = api.storage().address_bytes(&addr)?;
let mut results = api.storage().at_latest().await?.iter(addr).await?;
let mut approvals = Vec::new();
while let Some(Ok((key, value))) = results.next().await {
assert!(key.starts_with(&addr_bytes));
approvals.push(value);
while let Some(Ok(kv)) = results.next().await {
assert!(kv.key_bytes.starts_with(&addr_bytes));
approvals.push(kv.value);
}
assert_eq!(approvals.len(), assets.len());
let mut amounts = approvals.iter().map(|a| a.amount).collect::<Vec<_>>();
@@ -188,9 +187,10 @@ async fn storage_partial_lookup() -> Result<(), subxt::Error> {
let mut results = api.storage().at_latest().await?.iter(addr).await?;
let mut approvals = Vec::new();
while let Some(Ok((key, value))) = results.next().await {
assert!(key.starts_with(&addr_bytes));
approvals.push(value);
while let Some(Ok(kv)) = results.next().await {
assert!(kv.key_bytes.starts_with(&addr_bytes));
assert!(kv.keys.decoded().is_ok());
approvals.push(kv.value);
}
assert_eq!(approvals.len(), 1);
assert_eq!(approvals[0].amount, amount);
@@ -29,13 +29,10 @@
use crate::utils::node_runtime;
use codec::Compact;
use subxt::{
client::{LightClient, LightClientBuilder, OnlineClientT},
config::PolkadotConfig,
};
use subxt::{client::OnlineClient, config::PolkadotConfig, lightclient::LightClient};
use subxt_metadata::Metadata;
type Client = LightClient<PolkadotConfig>;
type Client = OnlineClient<PolkadotConfig>;
// Check that we can subscribe to non-finalized blocks.
async fn non_finalized_headers_subscription(api: &Client) -> Result<(), subxt::Error> {
@@ -167,11 +164,13 @@ async fn dynamic_events(api: &Client) -> Result<(), subxt::Error> {
#[tokio::test]
async fn light_client_testing() -> Result<(), subxt::Error> {
tracing_subscriber::fmt::init();
let now = std::time::Instant::now();
let api: LightClient<PolkadotConfig> = LightClientBuilder::new()
.build_from_url("wss://rpc.polkadot.io:443")
.await?;
let chainspec = subxt::utils::fetch_chainspec_from_rpc_node("wss://rpc.polkadot.io:443")
.await
.unwrap();
let (_lc, rpc) = LightClient::relay_chain(chainspec.get())?;
let api = Client::from_rpc_client(rpc).await?;
println!("Light client initialization took {:?}\n", now.elapsed());
@@ -11,9 +11,6 @@ use subxt::{
Config, OnlineClient,
};
#[cfg(feature = "unstable-light-client")]
use subxt::client::{LightClient, LightClientBuilder};
/// Spawn a local substrate node for testing subxt.
pub struct TestNodeProcess<R: Config> {
// Keep a handle to the node; once it's dropped the node is killed.
@@ -24,12 +21,7 @@ pub struct TestNodeProcess<R: Config> {
legacy_client: RefCell<Option<OnlineClient<R>>>,
rpc_client: rpc::RpcClient,
#[cfg(not(feature = "unstable-light-client"))]
client: OnlineClient<R>,
#[cfg(feature = "unstable-light-client")]
client: LightClient<R>,
}
impl<R> TestNodeProcess<R>
@@ -92,16 +84,9 @@ where
/// will use the legacy backend by default or the unstable backend if the
/// "unstable-backend-client" feature is enabled, so that we can run each
/// test against both.
#[cfg(not(feature = "unstable-light-client"))]
pub fn client(&self) -> OnlineClient<R> {
self.client.clone()
}
/// Returns the subxt client connected to the running node.
#[cfg(feature = "unstable-light-client")]
pub fn client(&self) -> LightClient<R> {
self.client.clone()
}
}
/// Construct a test node process.
@@ -201,7 +186,7 @@ async fn build_rpc_client(ws_url: &str) -> Result<rpc::RpcClient, String> {
async fn build_legacy_client<T: Config>(
rpc_client: rpc::RpcClient,
) -> Result<OnlineClient<T>, String> {
let backend = legacy::LegacyBackend::new(rpc_client);
let backend = legacy::LegacyBackend::builder().build(rpc_client);
let client = OnlineClient::from_backend(Arc::new(backend))
.await
.map_err(|e| format!("Cannot construct OnlineClient from backend: {e}"))?;
@@ -235,28 +220,41 @@ async fn build_unstable_client<T: Config>(
}
#[cfg(feature = "unstable-light-client")]
async fn build_light_client<T: Config>(proc: &SubstrateNode) -> Result<LightClient<T>, String> {
async fn build_light_client<T: Config>(proc: &SubstrateNode) -> Result<OnlineClient<T>, String> {
use subxt::lightclient::{ChainConfig, LightClient};
// RPC endpoint.
let ws_url = format!("ws://127.0.0.1:{}", proc.ws_port());
// Step 1. Wait for a few blocks to be produced using the subxt client.
// Wait for a few blocks to be produced using the subxt client.
let client = OnlineClient::<T>::from_url(ws_url.clone())
.await
.map_err(|err| format!("Failed to connect to node rpc at {ws_url}: {err}"))?;
super::wait_for_blocks(&client).await;
// Step 2. Construct the light client.
// P2p bootnode.
// Now, configure a light client; fetch the chain spec and modify the bootnodes.
let bootnode = format!(
"/ip4/127.0.0.1/tcp/{}/p2p/{}",
proc.p2p_port(),
proc.p2p_address()
);
LightClientBuilder::new()
.bootnodes([bootnode.as_str()])
.build_from_url(ws_url.as_str())
let chain_spec = subxt::utils::fetch_chainspec_from_rpc_node(ws_url.as_str())
.await
.map_err(|e| format!("Failed to construct light client {}", e.to_string()))
.map_err(|e| format!("Failed to obtain chain spec from local machine: {e}"))?;
let chain_config = ChainConfig::chain_spec(chain_spec.get())
.set_bootnodes([bootnode.as_str()])
.map_err(|e| format!("Light client: cannot update boot nodes: {e}"))?;
// Instantiate the light client.
let (_lightclient, rpc) = LightClient::relay_chain(chain_config)
.map_err(|e| format!("Light client: cannot add relay chain: {e}"))?;
// Instantiate subxt client from this.
let api = OnlineClient::from_rpc_client(rpc)
.await
.map_err(|e| format!("Failed to build OnlineClient from light client RPC: {e}"))?;
Ok(api)
}