mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-05-06 10:18:02 +00:00
c1ed1901e1
* fix online/offline confusion * unified cache file * multi-threaded babyyy * checkpoint for niklas * compiles * all tests pass with --test-threads 1 * child-tree scrape is also multi-threaded now. * better thread splitting * some suggestions (#12532) * some suggestions * tokio multithread * move unused dependencies * snapshot command * fix rem * a bit of cleanup * support optional checks * fix * OCW command migrated to wasm-only, as an example * state-version management fully in remote-ext * almost everything move to wasm executor, some CLI flags reduced * follow-chain works as well * Master.into() * everything builds now * concurrent insertion and download for remote builds * minor fix * fix a bug * checkpoint * some updates * fmt * review comments * fmt * fix * fmt * update * fmt * rename * fix the damn UI tests * fmt * remoe the thread abstraction for the time being * cleanup * fix CI * fmt * fix * fix a few more things * tweak log levels * better error handling * address grumbles: use futures::mpsc * review comments * fmt * Apply suggestions from code review Co-authored-by: Bastian Köcher <git@kchr.de> * Update utils/frame/try-runtime/cli/src/lib.rs Co-authored-by: Bastian Köcher <git@kchr.de> * better api version stuff * some doc update * a whole lot of docs * fmt * fix all docs * fmt * rpc rebase: Try-runtime Revamp and Facelift (#12921) * Introduce sensible weight constants (#12868) * Introduce sensible weight constants * cargo fmt * Remove unused import * Add missing import * ".git/.scripts/bench-bot.sh" pallet dev pallet_lottery Co-authored-by: command-bot <> * Checkout to the branch HEAD explicitly in `build-linux-substrate` (#12876) * cli: Improve pruning documentation (#12819) * cli: Improve pruning documentation Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * cli: Keep `finalized` notation and remove `canonical` one * cli: Fix cargo doc * cli: `PruningModeClap` IR enum Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * cli: Convert PruningModeClap into pruning modes Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * cli: Use `PruningModeClap` Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * cli: Rename to `DatabasePruningMode` Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * cli: Implement `FromStr` instead of `clap::ValueEnum` Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Update client/cli/src/params/pruning_params.rs Co-authored-by: Bastian Köcher <git@kchr.de> * Fix clippy Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * cli: Add option documentation back Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Apply suggestions from code review Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> Co-authored-by: Bastian Köcher <git@kchr.de> * Revert "Move LockableCurrency trait to fungibles::Lockable and deprecate LockableCurrency (#12798)" (#12882) This reverts commit9a014d1ecd. * Don't indefinitely block on shutting down Tokio (#12885) * Don't indefinitely on shutting down Tokio Now we wait in maximum 60 seconds before we shutdown the node. Tasks are may be leaked and leading to some data corruption. * Drink less :thinking_face: * General Message Queue Pallet (#12485) * The message queue * Make fully generic * Refactor * Docs * Refactor * Use iter not slice * Per-origin queues * Multi-queue processing * Introduce MaxReady * Remove MaxReady in favour of ready ring * Cleanups * ReadyRing and tests * Stale page reaping * from_components -> from_parts Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Move WeightCounter to sp_weights Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add MockedWeightInfo Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Deploy to kitchensink Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Use WeightCounter Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Small fixes and logging Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add service_page Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Typo Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Move service_page below service_queue Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add service_message Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Use correct weight function Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Overweight execution * Refactor * Missing file * Fix WeightCounter usage in scheduler Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix peek_index Take into account that decoding from a mutable slice modifies it. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add tests and bench service_page_item Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add debug_info Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add no-progress check to service_queues Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add more benches Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Bound from_message and try_append_message Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add PageReaped event Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Rename BookStateOf and BookStateFor Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Update tests and remove logging Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove redundant per-message origins; add footprint() and sweep_queue() * Move testing stuff to mock.rs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add integration test Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix no-progress check Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix debug_info Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fixup merge and tests Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix footprint tracking * Introduce * Formatting * OverweightEnqueued event, auto-servicing config item * Update tests and benchmarks Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Clippy Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add tests Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Provide change handler * Add missing BookStateFor::insert and call QueueChangeHandler Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Update benchmarks and weights Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * More tests... Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Use weight metering functions Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * weightInfo::process_message_payload is gone Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add defensive_saturating_accrue Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Rename WeightCounter to WeightMeter Ctr+Shift+H should do the trick. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Test on_initialize Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add module docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove origin from MaxMessageLen The message origin is not encoded into the heap and does therefore not influence the max message length anymore. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add BoundedVec::as_slice Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Test Page::{from_message, try_append_message} Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fixup docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Docs * Do nothing in sweep_queue if the queue does not exist ... otherwise it inserts default values into the storage. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Test ring (un)knitting Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Upgrade stress-test Change the test to not assume that all queued messages will be processed in the next block but split it over multiple. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * More tests... Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Beauty fixes Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * clippy Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Rename BoundedVec::as_slice to as_bounded_slice Conflicts with deref().as_slice() otherwise. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix imports Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove ReadyRing struct Was used for testing only. Instead use 'fn assert_ring' which also check the service head and backlinks. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Beauty fixes Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix stale page watermark Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Cleanup Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix test feature and clippy Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * QueueChanged handler is called correctly Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Update benches Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Abstract testing functions Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * More tests Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Cleanup Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Clippy Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * fmt Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Simplify tests Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Make stuff compile Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Extend overweight execution benchmark Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove TODOs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Test service queue with faulty MessageProcessor Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * fmt Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Update pallet ui tests to 1.65 Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * More docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Review doc fixes Co-authored-by: Robert Klotzner <eskimor@users.noreply.github.com> Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add weight_limit to extrinsic weight of execute_overweight * Correctly return unused weight * Return actual weight consumed in do_execute_overweight * Review fixes Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Set version 7.0.0-dev Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Make it compile Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Switch message_size to u64 Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Switch message_count to u64 Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix benchmarks Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Make CI green Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Docs * Update tests Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * ".git/.scripts/bench-bot.sh" pallet dev pallet_message_queue * Dont mention README.md in the Cargo.toml Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove reference to readme Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Co-authored-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Co-authored-by: parity-processbot <> Co-authored-by: Robert Klotzner <eskimor@users.noreply.github.com> Co-authored-by: Keith Yeung <kungfukeith11@gmail.com> * zombienet timings adjusted (#12890) * zombinet tests: add some timeout to allow net spin-up Sometimes tests are failing at first try, as the pods were not up yet. Adding timeout should allow the network to spin up properly. * initial timeout increased to 30s * Move import queue out of `sc-network` (#12764) * Move import queue out of `sc-network` Add supplementary asynchronous API for the import queue which means it can be run as an independent task and communicated with through the `ImportQueueService`. This commit removes removes block and justification imports from `sc-network` and provides `ChainSync` with a handle to import queue so it can import blocks and justifications. Polling of the import queue is moved complete out of `sc-network` and `sc_consensus::Link` is implemented for `ChainSyncInterfaceHandled` so the import queue can still influence the syncing process. * Fix tests * Apply review comments * Apply suggestions from code review Co-authored-by: Bastian Köcher <git@kchr.de> * Update client/network/sync/src/lib.rs Co-authored-by: Bastian Köcher <git@kchr.de> Co-authored-by: Bastian Köcher <git@kchr.de> * Trace response payload in default `jsonrpsee` middleware (#12886) * Trace result in default `jsonrpsee` middleware * `rpc_metrics::extra` Co-authored-by: Bastian Köcher <git@kchr.de> Co-authored-by: Bastian Köcher <git@kchr.de> * Ensure that we inform all tasks to stop before starting the 60 seconds shutdown (#12897) * Ensure that we inform all tasks to stop before starting the 60 seconds shutdown The change of waiting in maximum 60 seconds for the node to shutdown actually introduced a bug. We were actually waiting always 60 seconds as we didn't informed our tasks to shutdown. The solution to this problem is to drop the task manager as this will then inform all tasks to end. It also adds tests to ensure that the behaviors work as expected. (This should already have been done in the first pr! :() * ".git/.scripts/fmt.sh" 1 Co-authored-by: command-bot <> * Safe desired targets call (#12826) * checked call for desired targets * fix compile * fmt * fix tests * cleaner with and_then * Fix typo (#12900) * ValidateUnsigned: Improve docs. (#12870) * ValidateUnsigned: Improve docs. * Review comments * rpc server with HTTP/WS on the same socket (#12663) * jsonrpsee v0.16 add backwards compatibility run old http server on http only * cargo fmt * update jsonrpsee 0.16.1 * less verbose cors log * fix nit in log: WS -> HTTP * revert needless changes in Cargo.lock * remove unused features in tower * fix nits; add client-core feature * jsonrpsee v0.16.2 * `pallet-message-queue`: Fix license (#12895) * Fix license Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add mock doc Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Use explicit call indices (#12891) * frame-system: explicit call index Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Use explicit call indices Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * pallet-template: explicit call index Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * DNM: Temporarily require call_index Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Revert "DNM: Temporarily require call_index" This reverts commit c4934e312e12af72ca05a8029d7da753a9c99346. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Pin canonincalized block (#12902) * Remove implicit approval chilling upon slash. (#12420) * don't read slashing spans when taking election snapshot * update cargo.toml * bring back remote test * fix merge stuff * fix npos-voters function sig * remove as much redundant diff as you can * Update frame/staking/src/pallet/mod.rs Co-authored-by: Andronik <write@reusable.software> * fix * Update frame/staking/src/pallet/impls.rs * update lock * fix all tests * review comments * fmt * fix offence bench * clippy * ".git/.scripts/bench-bot.sh" pallet dev pallet_staking Co-authored-by: Andronik <write@reusable.software> Co-authored-by: Ankan <ankan.anurag@gmail.com> Co-authored-by: command-bot <> * bounties calls docs fix (#12909) Co-authored-by: parity-processbot <> * pallet-contracts migration pre-upgrade fix for v8 (#12905) * Only run pre-v8 migration check for versions older than 8 * Logix fix * use custom environment for publishing crates (#12912) * [contracts] Add debug buffer limit + enforcement (#12845) * Add debug buffer limit + enforcement Add debug buffer limit + enforcement * use BoundedVec for the debug buffer * revert schedule (debug buf len limit not needed anymore) * return DispatchError * addressed review comments * frame/remote-externalities: Fix clippy Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * frame/rpc: Add previous export Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Fixup some wrong dependencies (#12899) * Fixup some wrong dependencies Dev dependencies should not appear in the feature list. If features are required, they should be directly enabled for the `dev-dependency`. * More fixups * Fix fix * Remove deprecated feature * Make all work properly and nice!! * FMT * Fix formatting * add numerator and denominator to Rational128 Debug impl and increase precision of float representation (#12914) * Fix state-db pinning (#12927) * Pin all canonicalized blocks * Added a test * Docs * [ci] add job switcher (#12922) Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Co-authored-by: Keith Yeung <kungfukeith11@gmail.com> Co-authored-by: Vlad <vladimir@parity.io> Co-authored-by: Bastian Köcher <git@kchr.de> Co-authored-by: Anthony Alaribe <anthonyalaribe@gmail.com> Co-authored-by: Gavin Wood <gavin@parity.io> Co-authored-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Co-authored-by: Robert Klotzner <eskimor@users.noreply.github.com> Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> Co-authored-by: tgmichel <telmo@purestake.com> Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> Co-authored-by: Luke Schoen <ltfschoen@users.noreply.github.com> Co-authored-by: Niklas Adolfsson <niklasadolfsson1@gmail.com> Co-authored-by: Arkadiy Paronyan <arkady.paronyan@gmail.com> Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Andronik <write@reusable.software> Co-authored-by: Ankan <ankan.anurag@gmail.com> Co-authored-by: Muharem Ismailov <ismailov.m.h@gmail.com> Co-authored-by: Dino Pačandi <3002868+Dinonard@users.noreply.github.com> Co-authored-by: João Paulo Silva de Souza <77391175+joao-paulo-parity@users.noreply.github.com> Co-authored-by: Sasha Gryaznov <hi@agryaznov.com> Co-authored-by: Alexander Popiak <alexander.popiak@parity.io> Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> * Revert "rpc rebase: Try-runtime Revamp and Facelift (#12921)" This reverts commit 4ce770a9cb8daf1401529bda7d974b8c703f6b3e. * Lexnv/kiz revamp try runtime stuff (#12932) * Introduce sensible weight constants (#12868) * Introduce sensible weight constants * cargo fmt * Remove unused import * Add missing import * ".git/.scripts/bench-bot.sh" pallet dev pallet_lottery Co-authored-by: command-bot <> * Checkout to the branch HEAD explicitly in `build-linux-substrate` (#12876) * cli: Improve pruning documentation (#12819) * cli: Improve pruning documentation Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * cli: Keep `finalized` notation and remove `canonical` one * cli: Fix cargo doc * cli: `PruningModeClap` IR enum Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * cli: Convert PruningModeClap into pruning modes Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * cli: Use `PruningModeClap` Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * cli: Rename to `DatabasePruningMode` Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * cli: Implement `FromStr` instead of `clap::ValueEnum` Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Update client/cli/src/params/pruning_params.rs Co-authored-by: Bastian Köcher <git@kchr.de> * Fix clippy Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * cli: Add option documentation back Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Apply suggestions from code review Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> Co-authored-by: Bastian Köcher <git@kchr.de> * Revert "Move LockableCurrency trait to fungibles::Lockable and deprecate LockableCurrency (#12798)" (#12882) This reverts commit9a014d1ecd. * Don't indefinitely block on shutting down Tokio (#12885) * Don't indefinitely on shutting down Tokio Now we wait in maximum 60 seconds before we shutdown the node. Tasks are may be leaked and leading to some data corruption. * Drink less :thinking_face: * General Message Queue Pallet (#12485) * The message queue * Make fully generic * Refactor * Docs * Refactor * Use iter not slice * Per-origin queues * Multi-queue processing * Introduce MaxReady * Remove MaxReady in favour of ready ring * Cleanups * ReadyRing and tests * Stale page reaping * from_components -> from_parts Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Move WeightCounter to sp_weights Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add MockedWeightInfo Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Deploy to kitchensink Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Use WeightCounter Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Small fixes and logging Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add service_page Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Typo Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Move service_page below service_queue Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add service_message Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Use correct weight function Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Overweight execution * Refactor * Missing file * Fix WeightCounter usage in scheduler Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix peek_index Take into account that decoding from a mutable slice modifies it. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add tests and bench service_page_item Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add debug_info Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add no-progress check to service_queues Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add more benches Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Bound from_message and try_append_message Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add PageReaped event Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Rename BookStateOf and BookStateFor Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Update tests and remove logging Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove redundant per-message origins; add footprint() and sweep_queue() * Move testing stuff to mock.rs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add integration test Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix no-progress check Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix debug_info Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fixup merge and tests Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix footprint tracking * Introduce * Formatting * OverweightEnqueued event, auto-servicing config item * Update tests and benchmarks Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Clippy Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add tests Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Provide change handler * Add missing BookStateFor::insert and call QueueChangeHandler Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Update benchmarks and weights Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * More tests... Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Use weight metering functions Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * weightInfo::process_message_payload is gone Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add defensive_saturating_accrue Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Rename WeightCounter to WeightMeter Ctr+Shift+H should do the trick. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Test on_initialize Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add module docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove origin from MaxMessageLen The message origin is not encoded into the heap and does therefore not influence the max message length anymore. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add BoundedVec::as_slice Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Test Page::{from_message, try_append_message} Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fixup docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Docs * Do nothing in sweep_queue if the queue does not exist ... otherwise it inserts default values into the storage. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Test ring (un)knitting Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Upgrade stress-test Change the test to not assume that all queued messages will be processed in the next block but split it over multiple. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * More tests... Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Beauty fixes Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * clippy Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Rename BoundedVec::as_slice to as_bounded_slice Conflicts with deref().as_slice() otherwise. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix imports Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove ReadyRing struct Was used for testing only. Instead use 'fn assert_ring' which also check the service head and backlinks. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Beauty fixes Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix stale page watermark Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Cleanup Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix test feature and clippy Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * QueueChanged handler is called correctly Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Update benches Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Abstract testing functions Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * More tests Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Cleanup Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Clippy Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * fmt Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Simplify tests Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Make stuff compile Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Extend overweight execution benchmark Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove TODOs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Test service queue with faulty MessageProcessor Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * fmt Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Update pallet ui tests to 1.65 Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * More docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Review doc fixes Co-authored-by: Robert Klotzner <eskimor@users.noreply.github.com> Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add weight_limit to extrinsic weight of execute_overweight * Correctly return unused weight * Return actual weight consumed in do_execute_overweight * Review fixes Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Set version 7.0.0-dev Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Make it compile Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Switch message_size to u64 Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Switch message_count to u64 Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix benchmarks Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Make CI green Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Docs * Update tests Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * ".git/.scripts/bench-bot.sh" pallet dev pallet_message_queue * Dont mention README.md in the Cargo.toml Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove reference to readme Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Co-authored-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Co-authored-by: parity-processbot <> Co-authored-by: Robert Klotzner <eskimor@users.noreply.github.com> Co-authored-by: Keith Yeung <kungfukeith11@gmail.com> * zombienet timings adjusted (#12890) * zombinet tests: add some timeout to allow net spin-up Sometimes tests are failing at first try, as the pods were not up yet. Adding timeout should allow the network to spin up properly. * initial timeout increased to 30s * Move import queue out of `sc-network` (#12764) * Move import queue out of `sc-network` Add supplementary asynchronous API for the import queue which means it can be run as an independent task and communicated with through the `ImportQueueService`. This commit removes removes block and justification imports from `sc-network` and provides `ChainSync` with a handle to import queue so it can import blocks and justifications. Polling of the import queue is moved complete out of `sc-network` and `sc_consensus::Link` is implemented for `ChainSyncInterfaceHandled` so the import queue can still influence the syncing process. * Fix tests * Apply review comments * Apply suggestions from code review Co-authored-by: Bastian Köcher <git@kchr.de> * Update client/network/sync/src/lib.rs Co-authored-by: Bastian Köcher <git@kchr.de> Co-authored-by: Bastian Köcher <git@kchr.de> * Trace response payload in default `jsonrpsee` middleware (#12886) * Trace result in default `jsonrpsee` middleware * `rpc_metrics::extra` Co-authored-by: Bastian Köcher <git@kchr.de> Co-authored-by: Bastian Köcher <git@kchr.de> * Ensure that we inform all tasks to stop before starting the 60 seconds shutdown (#12897) * Ensure that we inform all tasks to stop before starting the 60 seconds shutdown The change of waiting in maximum 60 seconds for the node to shutdown actually introduced a bug. We were actually waiting always 60 seconds as we didn't informed our tasks to shutdown. The solution to this problem is to drop the task manager as this will then inform all tasks to end. It also adds tests to ensure that the behaviors work as expected. (This should already have been done in the first pr! :() * ".git/.scripts/fmt.sh" 1 Co-authored-by: command-bot <> * Safe desired targets call (#12826) * checked call for desired targets * fix compile * fmt * fix tests * cleaner with and_then * Fix typo (#12900) * ValidateUnsigned: Improve docs. (#12870) * ValidateUnsigned: Improve docs. * Review comments * rpc server with HTTP/WS on the same socket (#12663) * jsonrpsee v0.16 add backwards compatibility run old http server on http only * cargo fmt * update jsonrpsee 0.16.1 * less verbose cors log * fix nit in log: WS -> HTTP * revert needless changes in Cargo.lock * remove unused features in tower * fix nits; add client-core feature * jsonrpsee v0.16.2 * `pallet-message-queue`: Fix license (#12895) * Fix license Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add mock doc Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Use explicit call indices (#12891) * frame-system: explicit call index Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Use explicit call indices Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * pallet-template: explicit call index Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * DNM: Temporarily require call_index Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Revert "DNM: Temporarily require call_index" This reverts commit c4934e312e12af72ca05a8029d7da753a9c99346. Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Pin canonincalized block (#12902) * Remove implicit approval chilling upon slash. (#12420) * don't read slashing spans when taking election snapshot * update cargo.toml * bring back remote test * fix merge stuff * fix npos-voters function sig * remove as much redundant diff as you can * Update frame/staking/src/pallet/mod.rs Co-authored-by: Andronik <write@reusable.software> * fix * Update frame/staking/src/pallet/impls.rs * update lock * fix all tests * review comments * fmt * fix offence bench * clippy * ".git/.scripts/bench-bot.sh" pallet dev pallet_staking Co-authored-by: Andronik <write@reusable.software> Co-authored-by: Ankan <ankan.anurag@gmail.com> Co-authored-by: command-bot <> * bounties calls docs fix (#12909) Co-authored-by: parity-processbot <> * pallet-contracts migration pre-upgrade fix for v8 (#12905) * Only run pre-v8 migration check for versions older than 8 * Logix fix * use custom environment for publishing crates (#12912) * [contracts] Add debug buffer limit + enforcement (#12845) * Add debug buffer limit + enforcement Add debug buffer limit + enforcement * use BoundedVec for the debug buffer * revert schedule (debug buf len limit not needed anymore) * return DispatchError * addressed review comments * frame/remote-externalities: Fix clippy Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * frame/rpc: Add previous export Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> * Fixup some wrong dependencies (#12899) * Fixup some wrong dependencies Dev dependencies should not appear in the feature list. If features are required, they should be directly enabled for the `dev-dependency`. * More fixups * Fix fix * Remove deprecated feature * Make all work properly and nice!! * FMT * Fix formatting * add numerator and denominator to Rational128 Debug impl and increase precision of float representation (#12914) * Fix state-db pinning (#12927) * Pin all canonicalized blocks * Added a test * Docs * [ci] add job switcher (#12922) * Use LOG_TARGET in consensus related crates (#12875) * Use shared LOG_TARGET in consensus related crates * Rename target from "afg" to "grandpa" Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Co-authored-by: Keith Yeung <kungfukeith11@gmail.com> Co-authored-by: Vlad <vladimir@parity.io> Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Co-authored-by: Bastian Köcher <git@kchr.de> Co-authored-by: Anthony Alaribe <anthonyalaribe@gmail.com> Co-authored-by: Gavin Wood <gavin@parity.io> Co-authored-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Co-authored-by: Robert Klotzner <eskimor@users.noreply.github.com> Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> Co-authored-by: tgmichel <telmo@purestake.com> Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> Co-authored-by: Luke Schoen <ltfschoen@users.noreply.github.com> Co-authored-by: Niklas Adolfsson <niklasadolfsson1@gmail.com> Co-authored-by: Arkadiy Paronyan <arkady.paronyan@gmail.com> Co-authored-by: Andronik <write@reusable.software> Co-authored-by: Ankan <ankan.anurag@gmail.com> Co-authored-by: Muharem Ismailov <ismailov.m.h@gmail.com> Co-authored-by: Dino Pačandi <3002868+Dinonard@users.noreply.github.com> Co-authored-by: João Paulo Silva de Souza <77391175+joao-paulo-parity@users.noreply.github.com> Co-authored-by: Sasha Gryaznov <hi@agryaznov.com> Co-authored-by: Alexandru Vasile <alexandru.vasile@parity.io> Co-authored-by: Alexander Popiak <alexander.popiak@parity.io> Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: Davide Galassi <davxy@datawok.net> * Revert "Lexnv/kiz revamp try runtime stuff (#12932)" This reverts commit 378cfb26d984bcde467781f07ef8ddb6998212cb. * fmt * update * fix publish Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Co-authored-by: Niklas Adolfsson <niklasadolfsson1@gmail.com> Co-authored-by: Bastian Köcher <git@kchr.de> Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Co-authored-by: Keith Yeung <kungfukeith11@gmail.com> Co-authored-by: Vlad <vladimir@parity.io> Co-authored-by: Anthony Alaribe <anthonyalaribe@gmail.com> Co-authored-by: Gavin Wood <gavin@parity.io> Co-authored-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Co-authored-by: Robert Klotzner <eskimor@users.noreply.github.com> Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> Co-authored-by: tgmichel <telmo@purestake.com> Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> Co-authored-by: Luke Schoen <ltfschoen@users.noreply.github.com> Co-authored-by: Arkadiy Paronyan <arkady.paronyan@gmail.com> Co-authored-by: Andronik <write@reusable.software> Co-authored-by: Ankan <ankan.anurag@gmail.com> Co-authored-by: Muharem Ismailov <ismailov.m.h@gmail.com> Co-authored-by: Dino Pačandi <3002868+Dinonard@users.noreply.github.com> Co-authored-by: João Paulo Silva de Souza <77391175+joao-paulo-parity@users.noreply.github.com> Co-authored-by: Sasha Gryaznov <hi@agryaznov.com> Co-authored-by: Alexander Popiak <alexander.popiak@parity.io> Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: Alexandru Vasile <alexandru.vasile@parity.io> Co-authored-by: Davide Galassi <davxy@datawok.net>
1387 lines
38 KiB
Rust
1387 lines
38 KiB
Rust
// This file is part of Substrate.
|
|
|
|
// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd.
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
//! # Remote Externalities
|
|
//!
|
|
//! An equivalent of `sp_io::TestExternalities` that can load its state from a remote substrate
|
|
//! based chain, or a local state snapshot file.
|
|
|
|
use codec::{Decode, Encode};
|
|
use futures::{channel::mpsc, stream::StreamExt};
|
|
use log::*;
|
|
use serde::de::DeserializeOwned;
|
|
use sp_core::{
|
|
hashing::twox_128,
|
|
hexdisplay::HexDisplay,
|
|
storage::{
|
|
well_known_keys::{is_default_child_storage_key, DEFAULT_CHILD_STORAGE_KEY_PREFIX},
|
|
ChildInfo, ChildType, PrefixedStorageKey, StorageData, StorageKey,
|
|
},
|
|
};
|
|
pub use sp_io::TestExternalities;
|
|
use sp_runtime::{traits::Block as BlockT, StateVersion};
|
|
use std::{
|
|
fs,
|
|
num::NonZeroUsize,
|
|
ops::{Deref, DerefMut},
|
|
path::{Path, PathBuf},
|
|
sync::Arc,
|
|
thread,
|
|
};
|
|
use substrate_rpc_client::{
|
|
rpc_params, ws_client, BatchRequestBuilder, ChainApi, ClientT, StateApi, WsClient,
|
|
};
|
|
|
|
type KeyValue = (StorageKey, StorageData);
|
|
type TopKeyValues = Vec<KeyValue>;
|
|
type ChildKeyValues = Vec<(ChildInfo, Vec<KeyValue>)>;
|
|
|
|
const LOG_TARGET: &str = "remote-ext";
|
|
const DEFAULT_WS_ENDPOINT: &str = "wss://rpc.polkadot.io:443";
|
|
const DEFAULT_VALUE_DOWNLOAD_BATCH: usize = 4096;
|
|
// NOTE: increasing this value does not seem to impact speed all that much.
|
|
const DEFAULT_KEY_DOWNLOAD_PAGE: u32 = 1000;
|
|
/// The snapshot that we store on disk.
|
|
#[derive(Decode, Encode)]
|
|
struct Snapshot<B: BlockT> {
|
|
state_version: StateVersion,
|
|
block_hash: B::Hash,
|
|
top: TopKeyValues,
|
|
child: ChildKeyValues,
|
|
}
|
|
|
|
/// An externalities that acts exactly the same as [`sp_io::TestExternalities`] but has a few extra
|
|
/// bits and pieces to it, and can be loaded remotely.
|
|
pub struct RemoteExternalities<B: BlockT> {
|
|
/// The inner externalities.
|
|
pub inner_ext: TestExternalities,
|
|
/// The block hash it which we created this externality env.
|
|
pub block_hash: B::Hash,
|
|
}
|
|
|
|
impl<B: BlockT> Deref for RemoteExternalities<B> {
|
|
type Target = TestExternalities;
|
|
fn deref(&self) -> &Self::Target {
|
|
&self.inner_ext
|
|
}
|
|
}
|
|
|
|
impl<B: BlockT> DerefMut for RemoteExternalities<B> {
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
&mut self.inner_ext
|
|
}
|
|
}
|
|
|
|
/// The execution mode.
|
|
#[derive(Clone)]
|
|
pub enum Mode<B: BlockT> {
|
|
/// Online. Potentially writes to a snapshot file.
|
|
Online(OnlineConfig<B>),
|
|
/// Offline. Uses a state snapshot file and needs not any client config.
|
|
Offline(OfflineConfig),
|
|
/// Prefer using a snapshot file if it exists, else use a remote server.
|
|
OfflineOrElseOnline(OfflineConfig, OnlineConfig<B>),
|
|
}
|
|
|
|
impl<B: BlockT> Default for Mode<B> {
|
|
fn default() -> Self {
|
|
Mode::Online(OnlineConfig::default())
|
|
}
|
|
}
|
|
|
|
/// Configuration of the offline execution.
|
|
///
|
|
/// A state snapshot config must be present.
|
|
#[derive(Clone)]
|
|
pub struct OfflineConfig {
|
|
/// The configuration of the state snapshot file to use. It must be present.
|
|
pub state_snapshot: SnapshotConfig,
|
|
}
|
|
|
|
/// Description of the transport protocol (for online execution).
|
|
#[derive(Debug, Clone)]
|
|
pub enum Transport {
|
|
/// Use the `URI` to open a new WebSocket connection.
|
|
Uri(String),
|
|
/// Use existing WebSocket connection.
|
|
RemoteClient(Arc<WsClient>),
|
|
}
|
|
|
|
impl Transport {
|
|
fn as_client(&self) -> Option<&WsClient> {
|
|
match self {
|
|
Self::RemoteClient(client) => Some(client),
|
|
_ => None,
|
|
}
|
|
}
|
|
|
|
fn as_client_cloned(&self) -> Option<Arc<WsClient>> {
|
|
match self {
|
|
Self::RemoteClient(client) => Some(client.clone()),
|
|
_ => None,
|
|
}
|
|
}
|
|
|
|
// Open a new WebSocket connection if it's not connected.
|
|
async fn map_uri(&mut self) -> Result<(), &'static str> {
|
|
if let Self::Uri(uri) = self {
|
|
log::debug!(target: LOG_TARGET, "initializing remote client to {:?}", uri);
|
|
|
|
let ws_client = ws_client(uri).await.map_err(|e| {
|
|
log::error!(target: LOG_TARGET, "error: {:?}", e);
|
|
"failed to build ws client"
|
|
})?;
|
|
|
|
*self = Self::RemoteClient(Arc::new(ws_client))
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
impl From<String> for Transport {
|
|
fn from(uri: String) -> Self {
|
|
Transport::Uri(uri)
|
|
}
|
|
}
|
|
|
|
impl From<Arc<WsClient>> for Transport {
|
|
fn from(client: Arc<WsClient>) -> Self {
|
|
Transport::RemoteClient(client)
|
|
}
|
|
}
|
|
|
|
/// Configuration of the online execution.
|
|
///
|
|
/// A state snapshot config may be present and will be written to in that case.
|
|
#[derive(Clone)]
|
|
pub struct OnlineConfig<B: BlockT> {
|
|
/// The block hash at which to get the runtime state. Will be latest finalized head if not
|
|
/// provided.
|
|
pub at: Option<B::Hash>,
|
|
/// An optional state snapshot file to WRITE to, not for reading. Not written if set to `None`.
|
|
pub state_snapshot: Option<SnapshotConfig>,
|
|
/// The pallets to scrape. These values are hashed and added to `hashed_prefix`.
|
|
pub pallets: Vec<String>,
|
|
/// Transport config.
|
|
pub transport: Transport,
|
|
/// Lookout for child-keys, and scrape them as well if set to true.
|
|
pub child_trie: bool,
|
|
/// Storage entry key prefixes to be injected into the externalities. The *hashed* prefix must
|
|
/// be given.
|
|
pub hashed_prefixes: Vec<Vec<u8>>,
|
|
/// Storage entry keys to be injected into the externalities. The *hashed* key must be given.
|
|
pub hashed_keys: Vec<Vec<u8>>,
|
|
}
|
|
|
|
impl<B: BlockT> OnlineConfig<B> {
|
|
/// Return rpc (ws) client reference.
|
|
fn rpc_client(&self) -> &WsClient {
|
|
self.transport
|
|
.as_client()
|
|
.expect("ws client must have been initialized by now; qed.")
|
|
}
|
|
|
|
/// Return a cloned rpc (ws) client, suitable for being moved to threads.
|
|
fn rpc_client_cloned(&self) -> Arc<WsClient> {
|
|
self.transport
|
|
.as_client_cloned()
|
|
.expect("ws client must have been initialized by now; qed.")
|
|
}
|
|
|
|
fn at_expected(&self) -> B::Hash {
|
|
self.at.expect("block at must be initialized; qed")
|
|
}
|
|
}
|
|
|
|
impl<B: BlockT> Default for OnlineConfig<B> {
|
|
fn default() -> Self {
|
|
Self {
|
|
transport: Transport::from(DEFAULT_WS_ENDPOINT.to_owned()),
|
|
child_trie: true,
|
|
at: None,
|
|
state_snapshot: None,
|
|
pallets: Default::default(),
|
|
hashed_keys: Default::default(),
|
|
hashed_prefixes: Default::default(),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl<B: BlockT> From<String> for OnlineConfig<B> {
|
|
fn from(t: String) -> Self {
|
|
Self { transport: t.into(), ..Default::default() }
|
|
}
|
|
}
|
|
|
|
/// Configuration of the state snapshot.
|
|
#[derive(Clone)]
|
|
pub struct SnapshotConfig {
|
|
/// The path to the snapshot file.
|
|
pub path: PathBuf,
|
|
}
|
|
|
|
impl SnapshotConfig {
|
|
pub fn new<P: Into<PathBuf>>(path: P) -> Self {
|
|
Self { path: path.into() }
|
|
}
|
|
}
|
|
|
|
impl From<String> for SnapshotConfig {
|
|
fn from(s: String) -> Self {
|
|
Self::new(s)
|
|
}
|
|
}
|
|
|
|
impl Default for SnapshotConfig {
|
|
fn default() -> Self {
|
|
Self { path: Path::new("SNAPSHOT").into() }
|
|
}
|
|
}
|
|
|
|
/// Builder for remote-externalities.
|
|
pub struct Builder<B: BlockT> {
|
|
/// Custom key-pairs to be injected into the final externalities. The *hashed* keys and values
|
|
/// must be given.
|
|
hashed_key_values: Vec<KeyValue>,
|
|
/// The keys that will be excluded from the final externality. The *hashed* key must be given.
|
|
hashed_blacklist: Vec<Vec<u8>>,
|
|
/// Connectivity mode, online or offline.
|
|
mode: Mode<B>,
|
|
/// If provided, overwrite the state version with this. Otherwise, the state_version of the
|
|
/// remote node is used. All cache files also store their state version.
|
|
///
|
|
/// Overwrite only with care.
|
|
overwrite_state_version: Option<StateVersion>,
|
|
}
|
|
|
|
// NOTE: ideally we would use `DefaultNoBound` here, but not worth bringing in frame-support for
|
|
// that.
|
|
impl<B: BlockT> Default for Builder<B> {
|
|
fn default() -> Self {
|
|
Self {
|
|
mode: Default::default(),
|
|
hashed_key_values: Default::default(),
|
|
hashed_blacklist: Default::default(),
|
|
overwrite_state_version: None,
|
|
}
|
|
}
|
|
}
|
|
|
|
// Mode methods
|
|
impl<B: BlockT> Builder<B> {
|
|
fn as_online(&self) -> &OnlineConfig<B> {
|
|
match &self.mode {
|
|
Mode::Online(config) => config,
|
|
Mode::OfflineOrElseOnline(_, config) => config,
|
|
_ => panic!("Unexpected mode: Online"),
|
|
}
|
|
}
|
|
|
|
fn as_online_mut(&mut self) -> &mut OnlineConfig<B> {
|
|
match &mut self.mode {
|
|
Mode::Online(config) => config,
|
|
Mode::OfflineOrElseOnline(_, config) => config,
|
|
_ => panic!("Unexpected mode: Online"),
|
|
}
|
|
}
|
|
}
|
|
|
|
// RPC methods
|
|
impl<B: BlockT> Builder<B>
|
|
where
|
|
B::Hash: DeserializeOwned,
|
|
B::Header: DeserializeOwned,
|
|
{
|
|
/// Get the number of threads to use.
|
|
fn threads() -> NonZeroUsize {
|
|
thread::available_parallelism()
|
|
.unwrap_or(NonZeroUsize::new(4usize).expect("4 is non-zero; qed"))
|
|
}
|
|
|
|
async fn rpc_get_storage(
|
|
&self,
|
|
key: StorageKey,
|
|
maybe_at: Option<B::Hash>,
|
|
) -> Result<Option<StorageData>, &'static str> {
|
|
trace!(target: LOG_TARGET, "rpc: get_storage");
|
|
self.as_online().rpc_client().storage(key, maybe_at).await.map_err(|e| {
|
|
error!(target: LOG_TARGET, "Error = {:?}", e);
|
|
"rpc get_storage failed."
|
|
})
|
|
}
|
|
|
|
/// Get the latest finalized head.
|
|
async fn rpc_get_head(&self) -> Result<B::Hash, &'static str> {
|
|
trace!(target: LOG_TARGET, "rpc: finalized_head");
|
|
|
|
// sadly this pretty much unreadable...
|
|
ChainApi::<(), _, B::Header, ()>::finalized_head(self.as_online().rpc_client())
|
|
.await
|
|
.map_err(|e| {
|
|
error!(target: LOG_TARGET, "Error = {:?}", e);
|
|
"rpc finalized_head failed."
|
|
})
|
|
}
|
|
|
|
/// Get all the keys at `prefix` at `hash` using the paged, safe RPC methods.
|
|
async fn rpc_get_keys_paged(
|
|
&self,
|
|
prefix: StorageKey,
|
|
at: B::Hash,
|
|
) -> Result<Vec<StorageKey>, &'static str> {
|
|
let mut last_key: Option<StorageKey> = None;
|
|
let mut all_keys: Vec<StorageKey> = vec![];
|
|
let keys = loop {
|
|
let page = self
|
|
.as_online()
|
|
.rpc_client()
|
|
.storage_keys_paged(
|
|
Some(prefix.clone()),
|
|
DEFAULT_KEY_DOWNLOAD_PAGE,
|
|
last_key.clone(),
|
|
Some(at),
|
|
)
|
|
.await
|
|
.map_err(|e| {
|
|
error!(target: LOG_TARGET, "Error = {:?}", e);
|
|
"rpc get_keys failed"
|
|
})?;
|
|
let page_len = page.len();
|
|
|
|
all_keys.extend(page);
|
|
|
|
if page_len < DEFAULT_KEY_DOWNLOAD_PAGE as usize {
|
|
log::debug!(target: LOG_TARGET, "last page received: {}", page_len);
|
|
break all_keys
|
|
} else {
|
|
let new_last_key =
|
|
all_keys.last().expect("all_keys is populated; has .last(); qed");
|
|
log::debug!(
|
|
target: LOG_TARGET,
|
|
"new total = {}, full page received: {}",
|
|
all_keys.len(),
|
|
HexDisplay::from(new_last_key)
|
|
);
|
|
last_key = Some(new_last_key.clone());
|
|
}
|
|
};
|
|
|
|
Ok(keys)
|
|
}
|
|
|
|
/// Synonym of `getPairs` that uses paged queries to first get the keys, and then
|
|
/// map them to values one by one.
|
|
///
|
|
/// This can work with public nodes. But, expect it to be darn slow.
|
|
pub(crate) async fn rpc_get_pairs_paged(
|
|
&self,
|
|
prefix: StorageKey,
|
|
at: B::Hash,
|
|
pending_ext: &mut TestExternalities,
|
|
) -> Result<Vec<KeyValue>, &'static str> {
|
|
let keys = self.rpc_get_keys_paged(prefix.clone(), at).await?;
|
|
if keys.is_empty() {
|
|
return Ok(Default::default())
|
|
}
|
|
|
|
let client = self.as_online().rpc_client_cloned();
|
|
let threads = Self::threads().get();
|
|
let thread_chunk_size = (keys.len() + threads - 1) / threads;
|
|
|
|
log::info!(
|
|
target: LOG_TARGET,
|
|
"Querying a total of {} keys from prefix {:?}, splitting among {} threads, {} keys per thread",
|
|
keys.len(),
|
|
HexDisplay::from(&prefix),
|
|
threads,
|
|
thread_chunk_size,
|
|
);
|
|
|
|
let mut handles = Vec::new();
|
|
let keys_chunked: Vec<Vec<StorageKey>> =
|
|
keys.chunks(thread_chunk_size).map(|s| s.into()).collect::<Vec<_>>();
|
|
|
|
enum Message {
|
|
/// This thread completed the assigned work.
|
|
Terminated,
|
|
/// The thread produced the following batch response.
|
|
Batch(Vec<(Vec<u8>, Vec<u8>)>),
|
|
/// A request from the batch failed.
|
|
BatchFailed(String),
|
|
}
|
|
|
|
let (tx, mut rx) = mpsc::unbounded::<Message>();
|
|
|
|
for thread_keys in keys_chunked {
|
|
let thread_client = client.clone();
|
|
let thread_sender = tx.clone();
|
|
let handle = std::thread::spawn(move || {
|
|
let rt = tokio::runtime::Runtime::new().unwrap();
|
|
let mut thread_key_values = Vec::with_capacity(thread_keys.len());
|
|
|
|
for chunk_keys in thread_keys.chunks(DEFAULT_VALUE_DOWNLOAD_BATCH) {
|
|
let mut batch = BatchRequestBuilder::new();
|
|
|
|
for key in chunk_keys.iter() {
|
|
batch
|
|
.insert("state_getStorage", rpc_params![key, at])
|
|
.map_err(|_| "Invalid batch params")
|
|
.unwrap();
|
|
}
|
|
|
|
let batch_response = rt
|
|
.block_on(thread_client.batch_request::<Option<StorageData>>(batch))
|
|
.map_err(|e| {
|
|
log::error!(
|
|
target: LOG_TARGET,
|
|
"failed to execute batch: {:?}. Error: {:?}",
|
|
chunk_keys.iter().map(HexDisplay::from).collect::<Vec<_>>(),
|
|
e
|
|
);
|
|
"batch failed."
|
|
})
|
|
.unwrap();
|
|
|
|
// Check if we got responses for all submitted requests.
|
|
assert_eq!(chunk_keys.len(), batch_response.len());
|
|
|
|
let mut batch_kv = Vec::with_capacity(chunk_keys.len());
|
|
for (key, maybe_value) in chunk_keys.into_iter().zip(batch_response) {
|
|
match maybe_value {
|
|
Ok(Some(data)) => {
|
|
thread_key_values.push((key.clone(), data.clone()));
|
|
batch_kv.push((key.clone().0, data.0));
|
|
},
|
|
Ok(None) => {
|
|
log::warn!(
|
|
target: LOG_TARGET,
|
|
"key {:?} had none corresponding value.",
|
|
&key
|
|
);
|
|
let data = StorageData(vec![]);
|
|
thread_key_values.push((key.clone(), data.clone()));
|
|
batch_kv.push((key.clone().0, data.0));
|
|
},
|
|
Err(e) => {
|
|
let reason = format!("key {:?} failed: {:?}", &key, e);
|
|
log::error!(target: LOG_TARGET, "Reason: {}", reason);
|
|
// Signal failures to the main thread, stop aggregating (key, value)
|
|
// pairs and return immediately an error.
|
|
thread_sender.unbounded_send(Message::BatchFailed(reason)).unwrap();
|
|
return Default::default()
|
|
},
|
|
};
|
|
|
|
if thread_key_values.len() % (thread_keys.len() / 10).max(1) == 0 {
|
|
let ratio: f64 =
|
|
thread_key_values.len() as f64 / thread_keys.len() as f64;
|
|
log::debug!(
|
|
target: LOG_TARGET,
|
|
"[thread = {:?}] progress = {:.2} [{} / {}]",
|
|
std::thread::current().id(),
|
|
ratio,
|
|
thread_key_values.len(),
|
|
thread_keys.len(),
|
|
);
|
|
}
|
|
}
|
|
|
|
// Send this batch to the main thread to start inserting.
|
|
thread_sender.unbounded_send(Message::Batch(batch_kv)).unwrap();
|
|
}
|
|
|
|
thread_sender.unbounded_send(Message::Terminated).unwrap();
|
|
thread_key_values
|
|
});
|
|
|
|
handles.push(handle);
|
|
}
|
|
|
|
// first, wait until all threads send a `Terminated` message, in the meantime populate
|
|
// `pending_ext`.
|
|
let mut terminated = 0usize;
|
|
let mut batch_failed = false;
|
|
loop {
|
|
match rx.next().await.unwrap() {
|
|
Message::Batch(kv) => {
|
|
for (k, v) in kv {
|
|
// skip writing the child root data.
|
|
if is_default_child_storage_key(k.as_ref()) {
|
|
continue
|
|
}
|
|
pending_ext.insert(k, v);
|
|
}
|
|
},
|
|
Message::BatchFailed(error) => {
|
|
log::error!(target: LOG_TARGET, "Batch processing failed: {:?}", error);
|
|
batch_failed = true;
|
|
break
|
|
},
|
|
Message::Terminated => {
|
|
terminated += 1;
|
|
if terminated == handles.len() {
|
|
break
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
// Ensure all threads finished execution before returning.
|
|
let keys_and_values =
|
|
handles.into_iter().flat_map(|h| h.join().unwrap()).collect::<Vec<_>>();
|
|
|
|
if batch_failed {
|
|
return Err("Batch failed.")
|
|
}
|
|
|
|
Ok(keys_and_values)
|
|
}
|
|
|
|
/// Get the values corresponding to `child_keys` at the given `prefixed_top_key`.
|
|
pub(crate) async fn rpc_child_get_storage_paged(
|
|
client: &WsClient,
|
|
prefixed_top_key: &StorageKey,
|
|
child_keys: Vec<StorageKey>,
|
|
at: B::Hash,
|
|
) -> Result<Vec<KeyValue>, &'static str> {
|
|
let mut child_kv_inner = vec![];
|
|
let mut batch_success = true;
|
|
|
|
for batch_child_key in child_keys.chunks(DEFAULT_VALUE_DOWNLOAD_BATCH) {
|
|
let mut batch_request = BatchRequestBuilder::new();
|
|
|
|
for key in batch_child_key {
|
|
batch_request
|
|
.insert(
|
|
"childstate_getStorage",
|
|
rpc_params![
|
|
PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()),
|
|
key,
|
|
at
|
|
],
|
|
)
|
|
.map_err(|_| "Invalid batch params")?;
|
|
}
|
|
|
|
let batch_response =
|
|
client.batch_request::<Option<StorageData>>(batch_request).await.map_err(|e| {
|
|
log::error!(
|
|
target: LOG_TARGET,
|
|
"failed to execute batch: {:?}. Error: {:?}",
|
|
batch_child_key,
|
|
e
|
|
);
|
|
"batch failed."
|
|
})?;
|
|
|
|
assert_eq!(batch_child_key.len(), batch_response.len());
|
|
|
|
for (key, maybe_value) in batch_child_key.iter().zip(batch_response) {
|
|
match maybe_value {
|
|
Ok(Some(v)) => {
|
|
child_kv_inner.push((key.clone(), v));
|
|
},
|
|
Ok(None) => {
|
|
log::warn!(
|
|
target: LOG_TARGET,
|
|
"key {:?} had none corresponding value.",
|
|
&key
|
|
);
|
|
child_kv_inner.push((key.clone(), StorageData(vec![])));
|
|
},
|
|
Err(e) => {
|
|
log::error!(target: LOG_TARGET, "key {:?} failed: {:?}", &key, e);
|
|
batch_success = false;
|
|
},
|
|
};
|
|
}
|
|
}
|
|
|
|
if batch_success {
|
|
Ok(child_kv_inner)
|
|
} else {
|
|
Err("batch failed.")
|
|
}
|
|
}
|
|
|
|
pub(crate) async fn rpc_child_get_keys(
|
|
client: &WsClient,
|
|
prefixed_top_key: &StorageKey,
|
|
child_prefix: StorageKey,
|
|
at: B::Hash,
|
|
) -> Result<Vec<StorageKey>, &'static str> {
|
|
// This is deprecated and will generate a warning which causes the CI to fail.
|
|
#[allow(warnings)]
|
|
let child_keys = substrate_rpc_client::ChildStateApi::storage_keys(
|
|
client,
|
|
PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()),
|
|
child_prefix,
|
|
Some(at),
|
|
)
|
|
.await
|
|
.map_err(|e| {
|
|
error!(target: LOG_TARGET, "Error = {:?}", e);
|
|
"rpc child_get_keys failed."
|
|
})?;
|
|
|
|
debug!(
|
|
target: LOG_TARGET,
|
|
"[thread = {:?}] scraped {} child-keys of the child-bearing top key: {}",
|
|
std::thread::current().id(),
|
|
child_keys.len(),
|
|
HexDisplay::from(prefixed_top_key)
|
|
);
|
|
|
|
Ok(child_keys)
|
|
}
|
|
}
|
|
|
|
impl<B: BlockT + DeserializeOwned> Builder<B>
|
|
where
|
|
B::Hash: DeserializeOwned,
|
|
B::Header: DeserializeOwned,
|
|
{
|
|
/// Load all of the child keys from the remote config, given the already scraped list of top key
|
|
/// pairs.
|
|
///
|
|
/// `top_kv` need not be only child-bearing top keys. It should be all of the top keys that are
|
|
/// included thus far.
|
|
///
|
|
/// This function concurrently populates `pending_ext`. the return value is only for writing to
|
|
/// cache, we can also optimize further.
|
|
async fn load_child_remote(
|
|
&self,
|
|
top_kv: &[KeyValue],
|
|
pending_ext: &mut TestExternalities,
|
|
) -> Result<ChildKeyValues, &'static str> {
|
|
let child_roots = top_kv
|
|
.into_iter()
|
|
.filter_map(|(k, _)| is_default_child_storage_key(k.as_ref()).then(|| k.clone()))
|
|
.collect::<Vec<_>>();
|
|
|
|
if child_roots.is_empty() {
|
|
return Ok(Default::default())
|
|
}
|
|
|
|
// div-ceil simulation.
|
|
let threads = Self::threads().get();
|
|
let child_roots_per_thread = (child_roots.len() + threads - 1) / threads;
|
|
|
|
info!(
|
|
target: LOG_TARGET,
|
|
"👩👦 scraping child-tree data from {} top keys, split among {} threads, {} top keys per thread",
|
|
child_roots.len(),
|
|
threads,
|
|
child_roots_per_thread,
|
|
);
|
|
|
|
// NOTE: the threading done here is the simpler, yet slightly un-elegant because we are
|
|
// splitting child root among threads, and it is very common for these root to have vastly
|
|
// different child tries underneath them, causing some threads to finish way faster than
|
|
// others. Certainly still better than single thread though.
|
|
let mut handles = vec![];
|
|
let client = self.as_online().rpc_client_cloned();
|
|
let at = self.as_online().at_expected();
|
|
|
|
enum Message {
|
|
Terminated,
|
|
Batch((ChildInfo, Vec<(Vec<u8>, Vec<u8>)>)),
|
|
}
|
|
let (tx, mut rx) = mpsc::unbounded::<Message>();
|
|
|
|
for thread_child_roots in child_roots
|
|
.chunks(child_roots_per_thread)
|
|
.map(|x| x.into())
|
|
.collect::<Vec<Vec<_>>>()
|
|
{
|
|
let thread_client = client.clone();
|
|
let thread_sender = tx.clone();
|
|
let handle = thread::spawn(move || {
|
|
let rt = tokio::runtime::Runtime::new().unwrap();
|
|
let mut thread_child_kv = vec![];
|
|
for prefixed_top_key in thread_child_roots {
|
|
let child_keys = rt.block_on(Self::rpc_child_get_keys(
|
|
&thread_client,
|
|
&prefixed_top_key,
|
|
StorageKey(vec![]),
|
|
at,
|
|
))?;
|
|
let child_kv_inner = rt.block_on(Self::rpc_child_get_storage_paged(
|
|
&thread_client,
|
|
&prefixed_top_key,
|
|
child_keys,
|
|
at,
|
|
))?;
|
|
|
|
let prefixed_top_key = PrefixedStorageKey::new(prefixed_top_key.clone().0);
|
|
let un_prefixed = match ChildType::from_prefixed_key(&prefixed_top_key) {
|
|
Some((ChildType::ParentKeyId, storage_key)) => storage_key,
|
|
None => {
|
|
log::error!(target: LOG_TARGET, "invalid key: {:?}", prefixed_top_key);
|
|
return Err("Invalid child key")
|
|
},
|
|
};
|
|
|
|
thread_sender
|
|
.unbounded_send(Message::Batch((
|
|
ChildInfo::new_default(un_prefixed),
|
|
child_kv_inner
|
|
.iter()
|
|
.cloned()
|
|
.map(|(k, v)| (k.0, v.0))
|
|
.collect::<Vec<_>>(),
|
|
)))
|
|
.unwrap();
|
|
thread_child_kv.push((ChildInfo::new_default(un_prefixed), child_kv_inner));
|
|
}
|
|
|
|
thread_sender.unbounded_send(Message::Terminated).unwrap();
|
|
Ok(thread_child_kv)
|
|
});
|
|
handles.push(handle);
|
|
}
|
|
|
|
// first, wait until all threads send a `Terminated` message, in the meantime populate
|
|
// `pending_ext`.
|
|
let mut terminated = 0usize;
|
|
loop {
|
|
match rx.next().await.unwrap() {
|
|
Message::Batch((info, kvs)) =>
|
|
for (k, v) in kvs {
|
|
pending_ext.insert_child(info.clone(), k, v);
|
|
},
|
|
Message::Terminated => {
|
|
terminated += 1;
|
|
if terminated == handles.len() {
|
|
break
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
let child_kv = handles
|
|
.into_iter()
|
|
.flat_map(|h| h.join().unwrap())
|
|
.flatten()
|
|
.collect::<Vec<_>>();
|
|
Ok(child_kv)
|
|
}
|
|
|
|
/// Build `Self` from a network node denoted by `uri`.
|
|
///
|
|
/// This function concurrently populates `pending_ext`. the return value is only for writing to
|
|
/// cache, we can also optimize further.
|
|
async fn load_top_remote(
|
|
&self,
|
|
pending_ext: &mut TestExternalities,
|
|
) -> Result<TopKeyValues, &'static str> {
|
|
let config = self.as_online();
|
|
let at = self
|
|
.as_online()
|
|
.at
|
|
.expect("online config must be initialized by this point; qed.");
|
|
log::info!(target: LOG_TARGET, "scraping key-pairs from remote at block height {:?}", at);
|
|
|
|
let mut keys_and_values = Vec::new();
|
|
for prefix in &config.hashed_prefixes {
|
|
let now = std::time::Instant::now();
|
|
let additional_key_values =
|
|
self.rpc_get_pairs_paged(StorageKey(prefix.to_vec()), at, pending_ext).await?;
|
|
let elapsed = now.elapsed();
|
|
log::info!(
|
|
target: LOG_TARGET,
|
|
"adding data for hashed prefix: {:?}, took {:?}s",
|
|
HexDisplay::from(prefix),
|
|
elapsed.as_secs()
|
|
);
|
|
keys_and_values.extend(additional_key_values);
|
|
}
|
|
|
|
for key in &config.hashed_keys {
|
|
let key = StorageKey(key.to_vec());
|
|
log::info!(
|
|
target: LOG_TARGET,
|
|
"adding data for hashed key: {:?}",
|
|
HexDisplay::from(&key)
|
|
);
|
|
match self.rpc_get_storage(key.clone(), Some(at)).await? {
|
|
Some(value) => {
|
|
pending_ext.insert(key.clone().0, value.clone().0);
|
|
keys_and_values.push((key, value));
|
|
},
|
|
None => {
|
|
log::warn!(
|
|
target: LOG_TARGET,
|
|
"no data found for hashed key: {:?}",
|
|
HexDisplay::from(&key)
|
|
);
|
|
},
|
|
}
|
|
}
|
|
|
|
Ok(keys_and_values)
|
|
}
|
|
|
|
/// The entry point of execution, if `mode` is online.
|
|
///
|
|
/// initializes the remote client in `transport`, and sets the `at` field, if not specified.
|
|
async fn init_remote_client(&mut self) -> Result<(), &'static str> {
|
|
// First, initialize the ws client.
|
|
self.as_online_mut().transport.map_uri().await?;
|
|
|
|
// Then, if `at` is not set, set it.
|
|
if self.as_online().at.is_none() {
|
|
let at = self.rpc_get_head().await?;
|
|
log::info!(
|
|
target: LOG_TARGET,
|
|
"since no at is provided, setting it to latest finalized head, {:?}",
|
|
at
|
|
);
|
|
self.as_online_mut().at = Some(at);
|
|
}
|
|
|
|
// Then, a few transformation that we want to perform in the online config:
|
|
let online_config = self.as_online_mut();
|
|
online_config
|
|
.pallets
|
|
.iter()
|
|
.for_each(|p| online_config.hashed_prefixes.push(twox_128(p.as_bytes()).to_vec()));
|
|
|
|
if online_config.child_trie {
|
|
online_config.hashed_prefixes.push(DEFAULT_CHILD_STORAGE_KEY_PREFIX.to_vec());
|
|
}
|
|
|
|
// Finally, if by now, we have put any limitations on prefixes that we are interested in, we
|
|
// download everything.
|
|
if online_config
|
|
.hashed_prefixes
|
|
.iter()
|
|
.filter(|p| *p != DEFAULT_CHILD_STORAGE_KEY_PREFIX)
|
|
.count() == 0
|
|
{
|
|
log::info!(
|
|
target: LOG_TARGET,
|
|
"since no prefix is filtered, the data for all pallets will be downloaded"
|
|
);
|
|
online_config.hashed_prefixes.push(vec![]);
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Load the data from a remote server. The main code path is calling into `load_top_remote` and
|
|
/// `load_child_remote`.
|
|
///
|
|
/// Must be called after `init_remote_client`.
|
|
async fn load_remote_and_maybe_save(&mut self) -> Result<TestExternalities, &'static str> {
|
|
let state_version =
|
|
StateApi::<B::Hash>::runtime_version(self.as_online().rpc_client(), None)
|
|
.await
|
|
.map_err(|e| {
|
|
error!(target: LOG_TARGET, "Error = {:?}", e);
|
|
"rpc runtime_version failed."
|
|
})
|
|
.map(|v| v.state_version())?;
|
|
let mut pending_ext = TestExternalities::new_with_code_and_state(
|
|
Default::default(),
|
|
Default::default(),
|
|
self.overwrite_state_version.unwrap_or(state_version),
|
|
);
|
|
let top_kv = self.load_top_remote(&mut pending_ext).await?;
|
|
let child_kv = self.load_child_remote(&top_kv, &mut pending_ext).await?;
|
|
|
|
if let Some(path) = self.as_online().state_snapshot.clone().map(|c| c.path) {
|
|
let snapshot = Snapshot::<B> {
|
|
state_version,
|
|
top: top_kv,
|
|
child: child_kv,
|
|
block_hash: self
|
|
.as_online()
|
|
.at
|
|
.expect("set to `Some` in `init_remote_client`; must be called before; qed"),
|
|
};
|
|
let encoded = snapshot.encode();
|
|
log::info!(
|
|
target: LOG_TARGET,
|
|
"writing snapshot of {} bytes to {:?}",
|
|
encoded.len(),
|
|
path
|
|
);
|
|
std::fs::write(path, encoded).map_err(|_| "fs::write failed")?;
|
|
}
|
|
|
|
Ok(pending_ext)
|
|
}
|
|
|
|
fn load_snapshot(&mut self, path: PathBuf) -> Result<Snapshot<B>, &'static str> {
|
|
info!(target: LOG_TARGET, "loading data from snapshot {:?}", path);
|
|
let bytes = fs::read(path).map_err(|_| "fs::read failed.")?;
|
|
Decode::decode(&mut &*bytes).map_err(|_| "decode failed")
|
|
}
|
|
|
|
async fn do_load_remote(&mut self) -> Result<RemoteExternalities<B>, &'static str> {
|
|
self.init_remote_client().await?;
|
|
let block_hash = self.as_online().at_expected();
|
|
let inner_ext = self.load_remote_and_maybe_save().await?;
|
|
Ok(RemoteExternalities { block_hash, inner_ext })
|
|
}
|
|
|
|
fn do_load_offline(
|
|
&mut self,
|
|
config: OfflineConfig,
|
|
) -> Result<RemoteExternalities<B>, &'static str> {
|
|
let Snapshot { block_hash, top, child, state_version } =
|
|
self.load_snapshot(config.state_snapshot.path.clone())?;
|
|
|
|
let mut inner_ext = TestExternalities::new_with_code_and_state(
|
|
Default::default(),
|
|
Default::default(),
|
|
self.overwrite_state_version.unwrap_or(state_version),
|
|
);
|
|
|
|
info!(target: LOG_TARGET, "injecting a total of {} top keys", top.len());
|
|
for (k, v) in top {
|
|
// skip writing the child root data.
|
|
if is_default_child_storage_key(k.as_ref()) {
|
|
continue
|
|
}
|
|
inner_ext.insert(k.0, v.0);
|
|
}
|
|
|
|
info!(
|
|
target: LOG_TARGET,
|
|
"injecting a total of {} child keys",
|
|
child.iter().flat_map(|(_, kv)| kv).count()
|
|
);
|
|
|
|
for (info, key_values) in child {
|
|
for (k, v) in key_values {
|
|
inner_ext.insert_child(info.clone(), k.0, v.0);
|
|
}
|
|
}
|
|
|
|
Ok(RemoteExternalities { inner_ext, block_hash })
|
|
}
|
|
|
|
pub(crate) async fn pre_build(mut self) -> Result<RemoteExternalities<B>, &'static str> {
|
|
let mut ext = match self.mode.clone() {
|
|
Mode::Offline(config) => self.do_load_offline(config)?,
|
|
Mode::Online(_) => self.do_load_remote().await?,
|
|
Mode::OfflineOrElseOnline(offline_config, _) => {
|
|
match self.do_load_offline(offline_config) {
|
|
Ok(x) => x,
|
|
Err(_) => self.do_load_remote().await?,
|
|
}
|
|
},
|
|
};
|
|
|
|
// inject manual key values.
|
|
if !self.hashed_key_values.is_empty() {
|
|
log::info!(
|
|
target: LOG_TARGET,
|
|
"extending externalities with {} manually injected key-values",
|
|
self.hashed_key_values.len()
|
|
);
|
|
for (k, v) in self.hashed_key_values {
|
|
ext.insert(k.0, v.0);
|
|
}
|
|
}
|
|
|
|
// exclude manual key values.
|
|
if !self.hashed_blacklist.is_empty() {
|
|
log::info!(
|
|
target: LOG_TARGET,
|
|
"excluding externalities from {} keys",
|
|
self.hashed_blacklist.len()
|
|
);
|
|
for k in self.hashed_blacklist {
|
|
ext.execute_with(|| sp_io::storage::clear(&k));
|
|
}
|
|
}
|
|
|
|
Ok(ext)
|
|
}
|
|
}
|
|
|
|
// Public methods
|
|
impl<B: BlockT + DeserializeOwned> Builder<B>
|
|
where
|
|
B::Hash: DeserializeOwned,
|
|
B::Header: DeserializeOwned,
|
|
{
|
|
/// Create a new builder.
|
|
pub fn new() -> Self {
|
|
Default::default()
|
|
}
|
|
|
|
/// Inject a manual list of key and values to the storage.
|
|
pub fn inject_hashed_key_value(mut self, injections: Vec<KeyValue>) -> Self {
|
|
for i in injections {
|
|
self.hashed_key_values.push(i.clone());
|
|
}
|
|
self
|
|
}
|
|
|
|
/// Blacklist this hashed key from the final externalities. This is treated as-is, and should be
|
|
/// pre-hashed.
|
|
pub fn blacklist_hashed_key(mut self, hashed: &[u8]) -> Self {
|
|
self.hashed_blacklist.push(hashed.to_vec());
|
|
self
|
|
}
|
|
|
|
/// Configure a state snapshot to be used.
|
|
pub fn mode(mut self, mode: Mode<B>) -> Self {
|
|
self.mode = mode;
|
|
self
|
|
}
|
|
|
|
/// The state version to use.
|
|
pub fn overwrite_state_version(mut self, version: StateVersion) -> Self {
|
|
self.overwrite_state_version = Some(version);
|
|
self
|
|
}
|
|
|
|
pub async fn build(self) -> Result<RemoteExternalities<B>, &'static str> {
|
|
let mut ext = self.pre_build().await?;
|
|
ext.commit_all().unwrap();
|
|
|
|
info!(
|
|
target: LOG_TARGET,
|
|
"initialized state externalities with storage root {:?} and state_version {:?}",
|
|
ext.as_backend().root(),
|
|
ext.state_version
|
|
);
|
|
|
|
Ok(ext)
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod test_prelude {
|
|
use tracing_subscriber::EnvFilter;
|
|
|
|
pub(crate) use super::*;
|
|
pub(crate) use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash};
|
|
pub(crate) type Block = RawBlock<ExtrinsicWrapper<Hash>>;
|
|
|
|
pub(crate) fn init_logger() {
|
|
let _ = tracing_subscriber::fmt()
|
|
.with_env_filter(EnvFilter::from_default_env())
|
|
.with_level(true)
|
|
.try_init();
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::test_prelude::*;
|
|
|
|
#[tokio::test(flavor = "multi_thread")]
|
|
async fn can_load_state_snapshot() {
|
|
init_logger();
|
|
Builder::<Block>::new()
|
|
.mode(Mode::Offline(OfflineConfig {
|
|
state_snapshot: SnapshotConfig::new("test_data/proxy_test"),
|
|
}))
|
|
.build()
|
|
.await
|
|
.unwrap()
|
|
.execute_with(|| {});
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread")]
|
|
async fn can_exclude_from_snapshot() {
|
|
init_logger();
|
|
|
|
// get the first key from the snapshot file.
|
|
let some_key = Builder::<Block>::new()
|
|
.mode(Mode::Offline(OfflineConfig {
|
|
state_snapshot: SnapshotConfig::new("test_data/proxy_test"),
|
|
}))
|
|
.build()
|
|
.await
|
|
.expect("Can't read state snapshot file")
|
|
.execute_with(|| {
|
|
let key =
|
|
sp_io::storage::next_key(&[]).expect("some key must exist in the snapshot");
|
|
assert!(sp_io::storage::get(&key).is_some());
|
|
key
|
|
});
|
|
|
|
Builder::<Block>::new()
|
|
.mode(Mode::Offline(OfflineConfig {
|
|
state_snapshot: SnapshotConfig::new("test_data/proxy_test"),
|
|
}))
|
|
.blacklist_hashed_key(&some_key)
|
|
.build()
|
|
.await
|
|
.expect("Can't read state snapshot file")
|
|
.execute_with(|| assert!(sp_io::storage::get(&some_key).is_none()));
|
|
}
|
|
}
|
|
|
|
#[cfg(all(test, feature = "remote-test"))]
|
|
mod remote_tests {
|
|
use super::test_prelude::*;
|
|
use std::os::unix::fs::MetadataExt;
|
|
|
|
#[tokio::test(flavor = "multi_thread")]
|
|
async fn state_version_is_kept_and_can_be_altered() {
|
|
const CACHE: &'static str = "state_version_is_kept_and_can_be_altered";
|
|
init_logger();
|
|
|
|
// first, build a snapshot.
|
|
let ext = Builder::<Block>::new()
|
|
.mode(Mode::Online(OnlineConfig {
|
|
pallets: vec!["Proxy".to_owned()],
|
|
child_trie: false,
|
|
state_snapshot: Some(SnapshotConfig::new(CACHE)),
|
|
..Default::default()
|
|
}))
|
|
.build()
|
|
.await
|
|
.unwrap();
|
|
|
|
// now re-create the same snapshot.
|
|
let cached_ext = Builder::<Block>::new()
|
|
.mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) }))
|
|
.build()
|
|
.await
|
|
.unwrap();
|
|
|
|
assert_eq!(ext.state_version, cached_ext.state_version);
|
|
|
|
// now overwrite it
|
|
let other = match ext.state_version {
|
|
StateVersion::V0 => StateVersion::V1,
|
|
StateVersion::V1 => StateVersion::V0,
|
|
};
|
|
let cached_ext = Builder::<Block>::new()
|
|
.mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) }))
|
|
.overwrite_state_version(other)
|
|
.build()
|
|
.await
|
|
.unwrap();
|
|
|
|
assert_eq!(cached_ext.state_version, other);
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread")]
|
|
async fn snapshot_block_hash_works() {
|
|
const CACHE: &'static str = "snapshot_block_hash_works";
|
|
init_logger();
|
|
|
|
// first, build a snapshot.
|
|
let ext = Builder::<Block>::new()
|
|
.mode(Mode::Online(OnlineConfig {
|
|
pallets: vec!["Proxy".to_owned()],
|
|
child_trie: false,
|
|
state_snapshot: Some(SnapshotConfig::new(CACHE)),
|
|
..Default::default()
|
|
}))
|
|
.build()
|
|
.await
|
|
.unwrap();
|
|
|
|
// now re-create the same snapshot.
|
|
let cached_ext = Builder::<Block>::new()
|
|
.mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) }))
|
|
.build()
|
|
.await
|
|
.unwrap();
|
|
|
|
assert_eq!(ext.block_hash, cached_ext.block_hash);
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread")]
|
|
async fn offline_else_online_works() {
|
|
const CACHE: &'static str = "offline_else_online_works_data";
|
|
init_logger();
|
|
// this shows that in the second run, we use the remote and create a snapshot.
|
|
Builder::<Block>::new()
|
|
.mode(Mode::OfflineOrElseOnline(
|
|
OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) },
|
|
OnlineConfig {
|
|
pallets: vec!["Proxy".to_owned()],
|
|
child_trie: false,
|
|
state_snapshot: Some(SnapshotConfig::new(CACHE)),
|
|
..Default::default()
|
|
},
|
|
))
|
|
.build()
|
|
.await
|
|
.unwrap()
|
|
.execute_with(|| {});
|
|
|
|
// this shows that in the second run, we are not using the remote
|
|
Builder::<Block>::new()
|
|
.mode(Mode::OfflineOrElseOnline(
|
|
OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) },
|
|
OnlineConfig {
|
|
transport: "ws://non-existent:666".to_owned().into(),
|
|
..Default::default()
|
|
},
|
|
))
|
|
.build()
|
|
.await
|
|
.unwrap()
|
|
.execute_with(|| {});
|
|
|
|
let to_delete = std::fs::read_dir(Path::new("."))
|
|
.unwrap()
|
|
.into_iter()
|
|
.map(|d| d.unwrap())
|
|
.filter(|p| p.path().file_name().unwrap_or_default() == CACHE)
|
|
.collect::<Vec<_>>();
|
|
|
|
assert!(to_delete.len() == 1);
|
|
std::fs::remove_file(to_delete[0].path()).unwrap();
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread")]
|
|
async fn can_build_one_small_pallet() {
|
|
init_logger();
|
|
Builder::<Block>::new()
|
|
.mode(Mode::Online(OnlineConfig {
|
|
pallets: vec!["Proxy".to_owned()],
|
|
child_trie: false,
|
|
..Default::default()
|
|
}))
|
|
.build()
|
|
.await
|
|
.unwrap()
|
|
.execute_with(|| {});
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread")]
|
|
async fn can_build_few_pallet() {
|
|
init_logger();
|
|
Builder::<Block>::new()
|
|
.mode(Mode::Online(OnlineConfig {
|
|
pallets: vec!["Proxy".to_owned(), "Multisig".to_owned()],
|
|
child_trie: false,
|
|
..Default::default()
|
|
}))
|
|
.build()
|
|
.await
|
|
.unwrap()
|
|
.execute_with(|| {});
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread")]
|
|
async fn can_create_snapshot() {
|
|
const CACHE: &'static str = "can_create_snapshot";
|
|
init_logger();
|
|
|
|
Builder::<Block>::new()
|
|
.mode(Mode::Online(OnlineConfig {
|
|
state_snapshot: Some(SnapshotConfig::new(CACHE)),
|
|
pallets: vec!["Proxy".to_owned()],
|
|
child_trie: false,
|
|
..Default::default()
|
|
}))
|
|
.build()
|
|
.await
|
|
.unwrap()
|
|
.execute_with(|| {});
|
|
|
|
let to_delete = std::fs::read_dir(Path::new("."))
|
|
.unwrap()
|
|
.into_iter()
|
|
.map(|d| d.unwrap())
|
|
.filter(|p| p.path().file_name().unwrap_or_default() == CACHE)
|
|
.collect::<Vec<_>>();
|
|
|
|
let snap: Snapshot<Block> = Builder::<Block>::new().load_snapshot(CACHE.into()).unwrap();
|
|
assert!(matches!(snap, Snapshot { top, child, .. } if top.len() > 0 && child.len() == 0));
|
|
|
|
assert!(to_delete.len() == 1);
|
|
let to_delete = to_delete.first().unwrap();
|
|
assert!(std::fs::metadata(to_delete.path()).unwrap().size() > 1);
|
|
std::fs::remove_file(to_delete.path()).unwrap();
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread")]
|
|
async fn can_create_child_snapshot() {
|
|
const CACHE: &'static str = "can_create_child_snapshot";
|
|
init_logger();
|
|
Builder::<Block>::new()
|
|
.mode(Mode::Online(OnlineConfig {
|
|
state_snapshot: Some(SnapshotConfig::new(CACHE)),
|
|
pallets: vec!["Crowdloan".to_owned()],
|
|
child_trie: true,
|
|
..Default::default()
|
|
}))
|
|
.build()
|
|
.await
|
|
.unwrap()
|
|
.execute_with(|| {});
|
|
|
|
let to_delete = std::fs::read_dir(Path::new("."))
|
|
.unwrap()
|
|
.into_iter()
|
|
.map(|d| d.unwrap())
|
|
.filter(|p| p.path().file_name().unwrap_or_default() == CACHE)
|
|
.collect::<Vec<_>>();
|
|
|
|
let snap: Snapshot<Block> = Builder::<Block>::new().load_snapshot(CACHE.into()).unwrap();
|
|
assert!(matches!(snap, Snapshot { top, child, .. } if top.len() > 0 && child.len() > 0));
|
|
|
|
assert!(to_delete.len() == 1);
|
|
let to_delete = to_delete.first().unwrap();
|
|
assert!(std::fs::metadata(to_delete.path()).unwrap().size() > 1);
|
|
std::fs::remove_file(to_delete.path()).unwrap();
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread")]
|
|
async fn can_build_big_pallet() {
|
|
if std::option_env!("TEST_WS").is_none() {
|
|
return
|
|
}
|
|
init_logger();
|
|
Builder::<Block>::new()
|
|
.mode(Mode::Online(OnlineConfig {
|
|
transport: std::option_env!("TEST_WS").unwrap().to_owned().into(),
|
|
pallets: vec!["Staking".to_owned()],
|
|
child_trie: false,
|
|
..Default::default()
|
|
}))
|
|
.build()
|
|
.await
|
|
.unwrap()
|
|
.execute_with(|| {});
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread")]
|
|
async fn can_fetch_all() {
|
|
if std::option_env!("TEST_WS").is_none() {
|
|
return
|
|
}
|
|
init_logger();
|
|
Builder::<Block>::new()
|
|
.mode(Mode::Online(OnlineConfig {
|
|
transport: std::option_env!("TEST_WS").unwrap().to_owned().into(),
|
|
..Default::default()
|
|
}))
|
|
.build()
|
|
.await
|
|
.unwrap()
|
|
.execute_with(|| {});
|
|
}
|
|
}
|