chainHead based backend implementation (#1161)

* add follow_stream impl

* follow_stream_unpin first draft

* add tests for follow_stream_unpin

* more tests and fixes for follow_stream_unpin

* first pass follow_stream_driver

* follow_stream_driver: add tests, fix things, buffer events from last finalized

* First pass finishing Backend impl

* Fix test compile issues

* clippy fixes

* clippy fix and consistify light_client

* revert lightclient tweak

* revert other lightclient thing

* cargo fmt

* start testing unstable backend behind feature flag

* more test fixes and move test-runtime metadata path just incase

* fix compile error

* ensure transaction progress stream actually used and fix another test

* cargo fmt

* CI tweak

* improve some comments and address some feedback bits

* update CI to use our own nightly binary

* wait for finalized block perhaps
This commit is contained in:
James Wilson
2023-09-26 16:58:30 +01:00
committed by GitHub
parent 00cce68371
commit cf7e2db1b7
43 changed files with 2682 additions and 250 deletions
@@ -5,30 +5,73 @@
use crate::{test_context, utils::node_runtime};
use codec::{Compact, Encode};
use futures::StreamExt;
use subxt::blocks::BlocksClient;
use subxt_metadata::Metadata;
use subxt_signer::sr25519::dev;
// Check that we can subscribe to non-finalized blocks.
#[tokio::test]
async fn non_finalized_headers_subscription() -> Result<(), subxt::Error> {
async fn block_subscriptions_are_consistent_with_eachother() -> Result<(), subxt::Error> {
let ctx = test_context().await;
let api = ctx.client();
let mut sub = api.blocks().subscribe_best().await?;
let mut all_sub = api.blocks().subscribe_all().await?;
let mut best_sub = api.blocks().subscribe_best().await?;
let mut finalized_sub = api.blocks().subscribe_finalized().await?;
// Wait for the next set of headers, and check that the
// associated block hash is the one we just finalized.
// (this can be a bit slow as we have to wait for finalization)
let header = sub.next().await.unwrap()?;
let block_hash = header.hash();
let current_block_hash = api.backend().latest_best_block_ref().await?.hash();
let mut finals = vec![];
let mut bests = vec![];
let mut alls = vec![];
// Finalization can run behind a bit; blocks that were reported a while ago can
// only just now be being finalized (in the new RPCs this isn't true and we'll be
// told about all of those blocks up front). So, first we wait until finalization reports
// a block that we've seen as new.
loop {
tokio::select! {biased;
Some(Ok(b)) = all_sub.next() => alls.push(b.hash()),
Some(Ok(b)) = best_sub.next() => bests.push(b.hash()),
Some(Ok(b)) = finalized_sub.next() => if alls.contains(&b.hash()) { break },
}
}
// Now, gather a couple more finalized blocks as well as anything else we hear about.
while finals.len() < 2 {
tokio::select! {biased;
Some(Ok(b)) = all_sub.next() => alls.push(b.hash()),
Some(Ok(b)) = best_sub.next() => bests.push(b.hash()),
Some(Ok(b)) = finalized_sub.next() => finals.push(b.hash()),
}
}
// Check that the items in the first slice are found in the same order in the second slice.
fn are_same_order_in<T: PartialEq>(a_items: &[T], b_items: &[T]) -> bool {
let mut b_idx = 0;
for a in a_items {
if let Some((idx, _)) = b_items[b_idx..]
.iter()
.enumerate()
.find(|(_idx, b)| a == *b)
{
b_idx += idx;
} else {
return false;
}
}
true
}
// Final blocks and best blocks should both be subsets of _all_ of the blocks reported.
assert!(
are_same_order_in(&bests, &alls),
"Best set {bests:?} should be a subset of all: {alls:?}"
);
assert!(
are_same_order_in(&finals, &alls),
"Final set {finals:?} should be a subset of all: {alls:?}"
);
assert_eq!(block_hash, current_block_hash);
Ok(())
}
// Check that we can subscribe to finalized blocks.
#[tokio::test]
async fn finalized_headers_subscription() -> Result<(), subxt::Error> {
let ctx = test_context().await;
@@ -36,13 +79,13 @@ async fn finalized_headers_subscription() -> Result<(), subxt::Error> {
let mut sub = api.blocks().subscribe_finalized().await?;
// Wait for the next set of headers, and check that the
// associated block hash is the one we just finalized.
// (this can be a bit slow as we have to wait for finalization)
let header = sub.next().await.unwrap()?;
let finalized_hash = api.backend().latest_finalized_block_ref().await?.hash();
// check that the finalized block reported lines up with the `latest_finalized_block_ref`.
for _ in 0..2 {
let header = sub.next().await.unwrap()?;
let finalized_hash = api.backend().latest_finalized_block_ref().await?.hash();
assert_eq!(header.hash(), finalized_hash);
}
assert_eq!(header.hash(), finalized_hash);
Ok(())
}
@@ -117,17 +160,17 @@ async fn runtime_api_call() -> Result<(), subxt::Error> {
}
#[tokio::test]
async fn decode_extrinsics() {
async fn fetch_block_and_decode_extrinsic_details() {
let ctx = test_context().await;
let api = ctx.client();
let alice = dev::alice();
let bob = dev::bob();
// Generate a block that has unsigned and signed transactions.
// Setup; put an extrinsic into a block:
let tx = node_runtime::tx()
.balances()
.transfer(bob.public_key().into(), 10_000);
.transfer_allow_death(bob.public_key().into(), 10_000);
let signed_extrinsic = api
.tx()
@@ -143,19 +186,21 @@ async fn decode_extrinsics() {
.await
.unwrap();
// Now, separately, download that block. Let's see what it contains..
let block_hash = in_block.block_hash();
let block = BlocksClient::new(api).at(block_hash).await.unwrap();
let block = api.blocks().at(block_hash).await.unwrap();
let extrinsics = block.extrinsics().await.unwrap();
assert_eq!(extrinsics.len(), 2);
assert_eq!(extrinsics.block_hash(), block_hash);
// `.has` should work and find a transfer call.
assert!(extrinsics
.has::<node_runtime::balances::calls::types::Transfer>()
.has::<node_runtime::balances::calls::types::TransferAllowDeath>()
.unwrap());
// `.find_first` should similarly work to find the transfer call:
assert!(extrinsics
.find_first::<node_runtime::balances::calls::types::Transfer>()
.find_first::<node_runtime::balances::calls::types::TransferAllowDeath>()
.unwrap()
.is_some());
@@ -164,7 +209,7 @@ async fn decode_extrinsics() {
.map(|res| res.unwrap())
.collect::<Vec<_>>();
assert_eq!(block_extrinsics.len(), 2);
// All blocks contain a timestamp; check this first:
let timestamp = block_extrinsics.get(0).unwrap();
timestamp.as_root_extrinsic::<node_runtime::Call>().unwrap();
timestamp
@@ -172,9 +217,13 @@ async fn decode_extrinsics() {
.unwrap();
assert!(!timestamp.is_signed());
// Next we expect our transfer:
let tx = block_extrinsics.get(1).unwrap();
tx.as_root_extrinsic::<node_runtime::Call>().unwrap();
tx.as_extrinsic::<node_runtime::balances::calls::types::Transfer>()
let ext = tx
.as_extrinsic::<node_runtime::balances::calls::types::TransferAllowDeath>()
.unwrap()
.unwrap();
assert_eq!(ext.value, 10_000);
assert!(tx.is_signed());
}
@@ -72,7 +72,7 @@ async fn transaction_validation() {
let tx = node_runtime::tx()
.balances()
.transfer(bob.public_key().into(), 10_000);
.transfer_allow_death(bob.public_key().into(), 10_000);
let signed_extrinsic = api
.tx()
@@ -110,7 +110,7 @@ async fn validation_fails() {
// The actual TX is not important; the account has no funds to pay for it.
let tx = node_runtime::tx()
.balances()
.transfer(to.public_key().into(), 1);
.transfer_allow_death(to.public_key().into(), 1);
let signed_extrinsic = api
.tx()
@@ -232,7 +232,7 @@ async fn unsigned_extrinsic_is_same_shape_as_polkadotjs() {
let tx = node_runtime::tx()
.balances()
.transfer(dev::alice().public_key().into(), 12345000000000000);
.transfer_allow_death(dev::alice().public_key().into(), 12345000000000000);
let actual_tx = api.tx().create_unsigned(&tx).unwrap();
@@ -242,10 +242,10 @@ async fn unsigned_extrinsic_is_same_shape_as_polkadotjs() {
// - start local substrate node.
// - open polkadot.js UI in browser and point at local node.
// - open dev console (may need to refresh page now) and find the WS connection.
// - create a balances.transfer to ALICE with 12345 and "submit unsigned".
// - create a balances.transferAllowDeath to ALICE (doesn't matter who from) with 12345 and "submit unsigned".
// - find the submitAndWatchExtrinsic call in the WS connection to get these bytes:
let expected_tx_bytes = hex::decode(
"b004060700d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d0f0090c04bb6db2b"
"b004060000d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d0f0090c04bb6db2b"
)
.unwrap();
@@ -261,7 +261,7 @@ async fn extrinsic_hash_is_same_as_returned() {
let payload = node_runtime::tx()
.balances()
.transfer(dev::alice().public_key().into(), 12345000000000000);
.transfer_allow_death(dev::alice().public_key().into(), 12345000000000000);
let tx = api
.tx()
@@ -314,7 +314,7 @@ async fn partial_fee_estimate_correct() {
let bob = dev::bob();
let tx = node_runtime::tx()
.balances()
.transfer(bob.public_key().into(), 1_000_000_000_000);
.transfer_allow_death(bob.public_key().into(), 1_000_000_000_000);
let signed_extrinsic = api
.tx()
@@ -326,7 +326,7 @@ async fn partial_fee_estimate_correct() {
let partial_fee_1 = signed_extrinsic.partial_fee_estimate().await.unwrap();
// Method II: TransactionPaymentApi_query_fee_details + calculations
let latest_block_ref = api.backend().latest_best_block_ref().await.unwrap();
let latest_block_ref = api.backend().latest_finalized_block_ref().await.unwrap();
let len_bytes: [u8; 4] = (signed_extrinsic.encoded().len() as u32).to_le_bytes();
let encoded_with_len = [signed_extrinsic.encoded(), &len_bytes[..]].concat();
let InclusionFee {
@@ -8,8 +8,8 @@
use crate::{test_context, utils::node_runtime};
use assert_matches::assert_matches;
use codec::Encode;
use futures::Stream;
use subxt::{
backend::rpc::RpcSubscription,
backend::unstable::rpc_methods::{
FollowEvent, Initialized, MethodResponse, RuntimeEvent, RuntimeVersionEvent, StorageQuery,
StorageQueryType,
@@ -153,7 +153,7 @@ async fn chainhead_unstable_storage() {
event,
FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id &&
res.items.len() == 1 &&
res.items[0].key == format!("0x{}", hex::encode(addr_bytes))
res.items[0].key.0 == addr_bytes
);
let event = next_operation_event(&mut blocks).await;
@@ -218,8 +218,6 @@ async fn chainhead_unstable_unpin() {
assert!(rpc.chainhead_unstable_unpin(sub_id, hash).await.is_err());
}
// Ignored until this is implemented in Substrate
#[ignore]
#[tokio::test]
async fn chainspec_v1_genesishash() {
let ctx = test_context().await;
@@ -232,22 +230,18 @@ async fn chainspec_v1_genesishash() {
assert_eq!(a, b);
}
// Ignored until this is implemented in Substrate
#[ignore]
#[tokio::test]
async fn chainspec_v1_chainname() {
let ctx = test_context().await;
let old_rpc = ctx.legacy_rpc_methods().await;
let rpc = ctx.unstable_rpc_methods().await;
let a = old_rpc.system_name().await.unwrap();
let a = old_rpc.system_chain().await.unwrap();
let b = rpc.chainspec_v1_chain_name().await.unwrap();
assert_eq!(a, b);
}
// Ignored until this is implemented in Substrate
#[ignore]
#[tokio::test]
async fn chainspec_v1_properties() {
let ctx = test_context().await;
@@ -290,9 +284,14 @@ async fn transaction_unstable_submit_and_watch() {
}
/// Ignore block related events and obtain the next event related to an operation.
async fn next_operation_event<T: serde::de::DeserializeOwned>(
sub: &mut RpcSubscription<FollowEvent<T>>,
async fn next_operation_event<
T: serde::de::DeserializeOwned,
S: Unpin + Stream<Item = Result<FollowEvent<T>, subxt::Error>>,
>(
sub: &mut S,
) -> FollowEvent<T> {
use futures::StreamExt;
// Number of events to wait for the next operation event.
const NUM_EVENTS: usize = 10;
@@ -41,7 +41,9 @@ async fn tx_basic_transfer() -> Result<(), subxt::Error> {
.fetch_or_default(&bob_account_addr)
.await?;
let tx = node_runtime::tx().balances().transfer(bob_address, 10_000);
let tx = node_runtime::tx()
.balances()
.transfer_allow_death(bob_address, 10_000);
let events = api
.tx()
@@ -118,7 +120,7 @@ async fn tx_dynamic_transfer() -> Result<(), subxt::Error> {
let tx = subxt::dynamic::tx(
"Balances",
"transfer",
"transfer_allow_death",
vec![
Value::unnamed_variant(
"Id",
@@ -206,14 +208,14 @@ async fn tx_dynamic_transfer() -> Result<(), subxt::Error> {
}
#[tokio::test]
async fn multiple_transfers_work_nonce_incremented() -> Result<(), subxt::Error> {
async fn multiple_sequential_transfers_work() -> Result<(), subxt::Error> {
let alice = dev::alice();
let bob = dev::bob();
let bob_address: MultiAddress<AccountId32, u32> = bob.public_key().into();
let ctx = test_context().await;
let api = ctx.client();
let bob_account_addr = node_runtime::storage()
let bob_account_info_addr = node_runtime::storage()
.system()
.account(bob.public_key().to_account_id());
@@ -221,19 +223,19 @@ async fn multiple_transfers_work_nonce_incremented() -> Result<(), subxt::Error>
.storage()
.at_latest()
.await?
.fetch_or_default(&bob_account_addr)
.fetch_or_default(&bob_account_info_addr)
.await?;
// Do a transfer several times. If this works, it indicates that the
// nonce is properly incremented each time.
let tx = node_runtime::tx()
.balances()
.transfer(bob_address.clone(), 10_000);
.transfer_allow_death(bob_address.clone(), 10_000);
for _ in 0..3 {
api.tx()
.sign_and_submit_then_watch_default(&tx, &alice)
.await?
.wait_for_in_block() // Don't need to wait for finalization; this is quicker.
.await?
.wait_for_success()
.wait_for_finalized_success()
.await?;
}
@@ -241,7 +243,7 @@ async fn multiple_transfers_work_nonce_incremented() -> Result<(), subxt::Error>
.storage()
.at_latest()
.await?
.fetch_or_default(&bob_account_addr)
.fetch_or_default(&bob_account_info_addr)
.await?;
assert_eq!(bob_pre.data.free + 30_000, bob_post.data.free);
@@ -317,10 +319,10 @@ async fn transfer_error() {
let to_bob_tx = node_runtime::tx()
.balances()
.transfer(bob_address, 100_000_000_000_000_000);
.transfer_allow_death(bob_address, 100_000_000_000_000_000);
let to_alice_tx = node_runtime::tx()
.balances()
.transfer(alice_addr, 100_000_000_000_000_000);
.transfer_allow_death(alice_addr, 100_000_000_000_000_000);
api.tx()
.sign_and_submit_then_watch_default(&to_bob_tx, &alice)
@@ -360,7 +362,7 @@ async fn transfer_implicit_subscription() {
let to_bob_tx = node_runtime::tx()
.balances()
.transfer(bob.clone().into(), 10_000);
.transfer_allow_death(bob.clone().into(), 10_000);
let event = api
.tx()
@@ -203,7 +203,6 @@ async fn tx_call() {
let info_addr = node_runtime::storage()
.contracts()
.contract_info_of(&contract);
let info_addr_iter = node_runtime::storage().contracts().contract_info_of_iter();
let contract_info = cxt
.client()
@@ -213,7 +212,13 @@ async fn tx_call() {
.unwrap()
.fetch(&info_addr)
.await;
assert!(contract_info.is_ok());
assert!(
contract_info.is_ok(),
"Contract info is not ok, is: {contract_info:?}"
);
let info_addr_iter = node_runtime::storage().contracts().contract_info_of_iter();
let keys_and_values = cxt
.client()
@@ -23,7 +23,7 @@ async fn test_sudo() -> Result<(), subxt::Error> {
let alice = dev::alice();
let bob = dev::bob().public_key().into();
let call = Call::Balances(BalancesCall::transfer {
let call = Call::Balances(BalancesCall::transfer_allow_death {
dest: bob,
value: 10_000,
});
@@ -49,7 +49,7 @@ async fn test_sudo_unchecked_weight() -> Result<(), subxt::Error> {
let alice = dev::alice();
let bob = dev::bob().public_key().into();
let call = Call::Balances(BalancesCall::transfer {
let call = Call::Balances(BalancesCall::transfer_allow_death {
dest: bob,
value: 10_000,
});
@@ -59,7 +59,9 @@ async fn unchecked_extrinsic_encoding() -> Result<(), subxt::Error> {
let bob_address = bob.public_key().to_address();
// Construct a tx from Alice to Bob.
let tx = node_runtime::tx().balances().transfer(bob_address, 10_000);
let tx = node_runtime::tx()
.balances()
.transfer_allow_death(bob_address, 10_000);
let signed_extrinsic = api
.tx()