introduce errors with info (#1834)

This commit is contained in:
Bernhard Schuster
2020-10-27 08:10:03 +01:00
committed by GitHub
parent 40ea09389c
commit f345123748
58 changed files with 1983 additions and 2030 deletions
+58 -290
View File
@@ -224,32 +224,18 @@ dependencies = [
"futures-core",
]
[[package]]
name = "async-executor"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90f47c78ea98277cb1f5e6f60ba4fc762f5eafe9f6511bc2f7dfd8b75c225650"
dependencies = [
"async-io 0.1.5",
"futures-lite 0.1.10",
"multitask",
"parking 1.0.5",
"scoped-tls",
"waker-fn",
]
[[package]]
name = "async-executor"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d373d78ded7d0b3fa8039375718cde0aace493f2e34fb60f51cbf567562ca801"
dependencies = [
"async-task 4.0.3",
"async-task",
"concurrent-queue",
"fastrand",
"futures-lite 1.11.2",
"futures-lite",
"once_cell 1.4.1",
"vec-arena 1.0.0",
"vec-arena",
]
[[package]]
@@ -258,31 +244,13 @@ version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "124ac8c265e407641c3362b8f4d39cdb4e243885b71eef087be27199790f5a3a"
dependencies = [
"async-executor 1.3.0",
"async-io 1.1.0",
"futures-lite 1.11.2",
"async-executor",
"async-io",
"futures-lite",
"num_cpus",
"once_cell 1.4.1",
]
[[package]]
name = "async-io"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca8126ef9fb99355c6fd27575d691be4887b884137a5b6f48c2d961f13590c51"
dependencies = [
"cfg-if",
"concurrent-queue",
"futures-lite 0.1.10",
"libc",
"once_cell 1.4.1",
"parking 1.0.5",
"socket2",
"vec-arena 0.5.0",
"wepoll-sys-stjepang",
"winapi 0.3.9",
]
[[package]]
name = "async-io"
version = "1.1.0"
@@ -292,14 +260,14 @@ dependencies = [
"cfg-if",
"concurrent-queue",
"fastrand",
"futures-lite 1.11.2",
"futures-lite",
"libc",
"log 0.4.11",
"once_cell 1.4.1",
"parking 2.0.0",
"parking",
"polling",
"socket2",
"vec-arena 1.0.0",
"vec-arena",
"waker-fn",
"wepoll-sys-stjepang",
"winapi 0.3.9",
@@ -321,14 +289,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9fa76751505e8df1c7a77762f60486f60c71bbd9b8557f4da6ad47d083732ed"
dependencies = [
"async-global-executor",
"async-io 1.1.0",
"async-io",
"async-mutex",
"blocking 1.0.2",
"blocking",
"crossbeam-utils",
"futures-channel",
"futures-core",
"futures-io",
"futures-lite 1.11.2",
"futures-lite",
"gloo-timers",
"kv-log-macro",
"log 0.4.11",
@@ -341,12 +309,6 @@ dependencies = [
"wasm-bindgen-futures",
]
[[package]]
name = "async-task"
version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c17772156ef2829aadc587461c7753af20b7e8db1529bc66855add962a3b35d3"
[[package]]
name = "async-task"
version = "4.0.3"
@@ -598,33 +560,6 @@ dependencies = [
"byte-tools",
]
[[package]]
name = "blocking"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2468ff7bf85066b4a3678fede6fe66db31846d753ff0adfbfab2c6a6e81612b"
dependencies = [
"async-channel",
"atomic-waker",
"futures-lite 0.1.10",
"once_cell 1.4.1",
"parking 1.0.5",
"waker-fn",
]
[[package]]
name = "blocking"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76e94bf99b692f54c9d05f97454d3faf11134523fe5b180564a3fb6ed63bcc0a"
dependencies = [
"async-channel",
"atomic-waker",
"futures-lite 0.1.10",
"once_cell 1.4.1",
"waker-fn",
]
[[package]]
name = "blocking"
version = "1.0.2"
@@ -632,10 +567,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c5e170dbede1f740736619b776d7251cb1b9095c435c34d8ca9f57fcd2f335e9"
dependencies = [
"async-channel",
"async-task 4.0.3",
"async-task",
"atomic-waker",
"fastrand",
"futures-lite 1.11.2",
"futures-lite",
"once_cell 1.4.1",
]
@@ -1107,32 +1042,6 @@ version = "2.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72aa14c04dfae8dd7d8a2b1cb7ca2152618cd01336dbfe704b8dcbf8d41dbd69"
[[package]]
name = "derive_more"
version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d944ac6003ed268757ef1ee686753b57efc5fcf0ebe7b64c9fc81e7e32ff839"
dependencies = [
"proc-macro2 0.4.30",
"quote 0.6.13",
"rustc_version",
"syn 0.15.44",
]
[[package]]
name = "derive_more"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a141330240c921ec6d074a3e188a7c7ef95668bb95e7d44fa0e5778ec2a7afe"
dependencies = [
"lazy_static",
"proc-macro2 0.4.30",
"quote 0.6.13",
"regex",
"rustc_version",
"syn 0.15.44",
]
[[package]]
name = "derive_more"
version = "0.99.11"
@@ -1241,12 +1150,6 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c53dc3a653e0f64081026e4bf048d48fec9fce90c66e8326ca7292df0ff2d82"
[[package]]
name = "easy-parallel"
version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dd4afd79212583ff429b913ad6605242ed7eec277e950b1438f300748f948f4"
[[package]]
name = "ed25519"
version = "1.0.1"
@@ -1448,15 +1351,16 @@ dependencies = [
[[package]]
name = "femme"
version = "2.1.0"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b6b21baebbed15551f2170010ca4101b9ed3fdc05822791c8bd4631840eab81"
checksum = "2af1a24f391a5a94d756db5092c6576aad494b88a71a5a36b20c67b63e0df034"
dependencies = [
"cfg-if",
"js-sys",
"log 0.4.11",
"serde",
"serde_derive",
"serde_json",
"wasm-bindgen",
"web-sys",
]
@@ -1829,21 +1733,6 @@ version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789"
[[package]]
name = "futures-lite"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbe71459749b2e8e66fb95df721b22fa08661ad384a0c5b519e11d3893b4692a"
dependencies = [
"fastrand",
"futures-core",
"futures-io",
"memchr",
"parking 1.0.5",
"pin-project-lite",
"waker-fn",
]
[[package]]
name = "futures-lite"
version = "1.11.2"
@@ -1854,7 +1743,7 @@ dependencies = [
"futures-core",
"futures-io",
"memchr",
"parking 2.0.0",
"parking",
"pin-project-lite",
"waker-fn",
]
@@ -3641,17 +3530,6 @@ dependencies = [
"unsigned-varint 0.4.0",
]
[[package]]
name = "multitask"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c09c35271e7dcdb5f709779111f2c8e8ab8e06c1b587c1c6a9e179d865aaa5b4"
dependencies = [
"async-task 3.0.0",
"concurrent-queue",
"fastrand",
]
[[package]]
name = "nalgebra"
version = "0.18.1"
@@ -4544,12 +4422,6 @@ dependencies = [
"url 2.1.1",
]
[[package]]
name = "parking"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50d4a6da31f8144a32532fe38fe8fb439a6842e0ec633f0037f0144c14e7f907"
[[package]]
name = "parking"
version = "2.0.0"
@@ -4784,25 +4656,18 @@ dependencies = [
"bitvec",
"env_logger 0.7.1",
"futures 0.3.5",
"futures-timer 3.0.2",
"log 0.4.11",
"maplit",
"parity-scale-codec",
"parking_lot 0.11.0",
"polkadot-network-bridge",
"polkadot-node-network-protocol",
"polkadot-node-primitives",
"polkadot-node-subsystem",
"polkadot-node-subsystem-test-helpers",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"sc-keystore",
"sc-network",
"smol 0.3.3",
"sp-application-crypto",
"sp-core",
"sp-keystore",
"streamunordered",
"tempfile",
]
@@ -4811,16 +4676,12 @@ name = "polkadot-availability-distribution"
version = "0.1.0"
dependencies = [
"assert_matches",
"bitvec",
"derive_more 0.99.11",
"env_logger 0.7.1",
"futures 0.3.5",
"futures-timer 3.0.2",
"log 0.4.11",
"parity-scale-codec",
"parking_lot 0.11.0",
"polkadot-erasure-coding",
"polkadot-network-bridge",
"polkadot-node-network-protocol",
"polkadot-node-subsystem",
"polkadot-node-subsystem-test-helpers",
@@ -4832,7 +4693,7 @@ dependencies = [
"sp-core",
"sp-keyring",
"sp-keystore",
"streamunordered",
"thiserror",
]
[[package]]
@@ -4840,23 +4701,16 @@ name = "polkadot-cli"
version = "0.8.25"
dependencies = [
"frame-benchmarking-cli",
"futures 0.3.5",
"log 0.4.11",
"polkadot-service",
"sc-cli",
"sc-client-api",
"sc-client-db",
"sc-executor",
"sc-service",
"sc-tracing",
"sp-api",
"sp-core",
"sp-runtime",
"sp-trie",
"structopt",
"substrate-browser-utils",
"substrate-build-script-utils",
"tokio 0.2.21",
"wasm-bindgen",
"wasm-bindgen-futures",
]
@@ -4866,22 +4720,19 @@ name = "polkadot-collator-protocol"
version = "0.1.0"
dependencies = [
"assert_matches",
"derive_more 0.99.11",
"env_logger 0.7.1",
"futures 0.3.5",
"futures-timer 3.0.2",
"log 0.4.11",
"parity-scale-codec",
"polkadot-network-bridge",
"polkadot-node-network-protocol",
"polkadot-node-subsystem",
"polkadot-node-subsystem-test-helpers",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"smallvec 1.4.2",
"smol-timeout",
"sp-core",
"sp-keyring",
"thiserror",
]
[[package]]
@@ -4898,12 +4749,12 @@ dependencies = [
name = "polkadot-erasure-coding"
version = "0.8.25"
dependencies = [
"derive_more 0.15.0",
"parity-scale-codec",
"polkadot-primitives",
"reed-solomon-erasure",
"sp-core",
"sp-trie",
"thiserror",
]
[[package]]
@@ -4913,7 +4764,6 @@ dependencies = [
"assert_matches",
"async-trait",
"futures 0.3.5",
"futures-timer 3.0.2",
"log 0.4.11",
"parity-scale-codec",
"parking_lot 0.10.2",
@@ -4926,14 +4776,12 @@ dependencies = [
"sp-core",
"sp-keyring",
"sp-runtime",
"streamunordered",
]
[[package]]
name = "polkadot-node-collation-generation"
version = "0.1.0"
dependencies = [
"derive_more 0.99.11",
"futures 0.3.5",
"log 0.4.11",
"polkadot-erasure-coding",
@@ -4943,6 +4791,7 @@ dependencies = [
"polkadot-node-subsystem-util",
"polkadot-primitives",
"sp-core",
"thiserror",
]
[[package]]
@@ -4950,7 +4799,6 @@ name = "polkadot-node-core-av-store"
version = "0.1.0"
dependencies = [
"assert_matches",
"derive_more 0.99.11",
"env_logger 0.7.1",
"futures 0.3.5",
"futures-timer 3.0.2",
@@ -4967,6 +4815,7 @@ dependencies = [
"polkadot-primitives",
"smallvec 1.4.2",
"sp-core",
"thiserror",
]
[[package]]
@@ -4975,7 +4824,6 @@ version = "0.1.0"
dependencies = [
"assert_matches",
"bitvec",
"derive_more 0.99.11",
"futures 0.3.5",
"log 0.4.11",
"polkadot-erasure-coding",
@@ -4985,14 +4833,12 @@ dependencies = [
"polkadot-node-subsystem-util",
"polkadot-primitives",
"polkadot-statement-table",
"sc-client-api",
"sc-keystore",
"sp-api",
"sp-application-crypto",
"sp-blockchain",
"sp-core",
"sp-keyring",
"sp-keystore",
"thiserror",
]
[[package]]
@@ -5000,13 +4846,13 @@ name = "polkadot-node-core-bitfield-signing"
version = "0.1.0"
dependencies = [
"bitvec",
"derive_more 0.99.11",
"futures 0.3.5",
"log 0.4.11",
"polkadot-node-subsystem",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"sp-keystore",
"thiserror",
"wasm-timer",
]
@@ -5014,7 +4860,6 @@ dependencies = [
name = "polkadot-node-core-candidate-selection"
version = "0.1.0"
dependencies = [
"derive_more 0.99.11",
"futures 0.3.5",
"log 0.4.11",
"polkadot-node-primitives",
@@ -5022,6 +4867,7 @@ dependencies = [
"polkadot-node-subsystem-util",
"polkadot-primitives",
"sp-core",
"thiserror",
]
[[package]]
@@ -5029,7 +4875,6 @@ name = "polkadot-node-core-candidate-validation"
version = "0.1.0"
dependencies = [
"assert_matches",
"derive_more 0.99.11",
"futures 0.3.5",
"log 0.4.11",
"parity-scale-codec",
@@ -5039,7 +4884,6 @@ dependencies = [
"polkadot-node-subsystem-util",
"polkadot-parachain",
"polkadot-primitives",
"sp-blockchain",
"sp-core",
"sp-keyring",
]
@@ -5063,16 +4907,13 @@ name = "polkadot-node-core-proposer"
version = "0.1.0"
dependencies = [
"futures 0.3.5",
"futures-timer 3.0.2",
"log 0.4.11",
"parity-scale-codec",
"polkadot-node-subsystem",
"polkadot-overseer",
"polkadot-primitives",
"sc-basic-authorship",
"sc-block-builder",
"sc-client-api",
"sc-telemetry",
"sp-api",
"sp-blockchain",
"sp-consensus",
@@ -5080,7 +4921,6 @@ dependencies = [
"sp-inherents",
"sp-runtime",
"sp-transaction-pool",
"tokio-executor 0.2.0-alpha.6",
"wasm-timer",
]
@@ -5089,19 +4929,17 @@ name = "polkadot-node-core-provisioner"
version = "0.1.0"
dependencies = [
"bitvec",
"derive_more 0.99.11",
"futures 0.3.5",
"futures-timer 3.0.2",
"lazy_static",
"log 0.4.11",
"polkadot-node-subsystem",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"sc-keystore",
"sp-application-crypto",
"sp-core",
"sp-keystore",
"tempfile",
"thiserror",
]
[[package]]
@@ -5109,13 +4947,11 @@ name = "polkadot-node-core-runtime-api"
version = "0.1.0"
dependencies = [
"futures 0.3.5",
"polkadot-node-primitives",
"polkadot-node-subsystem",
"polkadot-node-subsystem-test-helpers",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"sp-api",
"sp-blockchain",
"sp-core",
]
@@ -5127,8 +4963,6 @@ dependencies = [
"polkadot-node-primitives",
"polkadot-primitives",
"sc-network",
"sp-core",
"sp-runtime",
]
[[package]]
@@ -5149,7 +4983,7 @@ version = "0.1.0"
dependencies = [
"assert_matches",
"async-trait",
"derive_more 0.99.11",
"derive_more",
"futures 0.3.5",
"futures-timer 3.0.2",
"log 0.4.11",
@@ -5164,6 +4998,7 @@ dependencies = [
"sc-network",
"smallvec 1.4.2",
"sp-core",
"thiserror",
]
[[package]]
@@ -5171,7 +5006,6 @@ name = "polkadot-node-subsystem-test-helpers"
version = "0.1.0"
dependencies = [
"async-trait",
"derive_more 0.99.11",
"futures 0.3.5",
"futures-timer 3.0.2",
"log 0.4.11",
@@ -5195,7 +5029,6 @@ version = "0.1.0"
dependencies = [
"assert_matches",
"async-trait",
"derive_more 0.99.11",
"env_logger 0.7.1",
"futures 0.3.5",
"futures-timer 3.0.2",
@@ -5207,14 +5040,13 @@ dependencies = [
"polkadot-node-subsystem",
"polkadot-node-subsystem-test-helpers",
"polkadot-primitives",
"polkadot-statement-table",
"sc-network",
"smallvec 1.4.2",
"sp-application-crypto",
"sp-core",
"sp-keystore",
"streamunordered",
"substrate-prometheus-endpoint",
"thiserror",
]
[[package]]
@@ -5241,7 +5073,7 @@ dependencies = [
name = "polkadot-parachain"
version = "0.8.25"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"futures 0.3.5",
"log 0.4.11",
"parity-scale-codec",
@@ -5264,19 +5096,13 @@ version = "0.1.0"
dependencies = [
"assert_matches",
"futures 0.3.5",
"futures-timer 3.0.2",
"log 0.4.11",
"parity-scale-codec",
"parking_lot 0.10.2",
"polkadot-node-network-protocol",
"polkadot-node-primitives",
"polkadot-node-subsystem",
"polkadot-node-subsystem-test-helpers",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"sp-core",
"sp-runtime",
"streamunordered",
]
[[package]]
@@ -5570,11 +5396,8 @@ dependencies = [
"arrayvec 0.5.1",
"assert_matches",
"futures 0.3.5",
"futures-timer 3.0.2",
"indexmap",
"log 0.4.11",
"parity-scale-codec",
"parking_lot 0.10.2",
"polkadot-node-network-protocol",
"polkadot-node-primitives",
"polkadot-node-subsystem",
@@ -5586,9 +5409,7 @@ dependencies = [
"sp-core",
"sp-keyring",
"sp-keystore",
"sp-runtime",
"sp-staking",
"streamunordered",
]
[[package]]
@@ -5736,7 +5557,6 @@ dependencies = [
name = "polkadot-validation"
version = "0.8.25"
dependencies = [
"derive_more 0.14.1",
"futures 0.3.5",
"log 0.4.11",
"parity-scale-codec",
@@ -5758,6 +5578,7 @@ dependencies = [
"sp-transaction-pool",
"sp-trie",
"substrate-prometheus-endpoint",
"thiserror",
]
[[package]]
@@ -6590,7 +6411,7 @@ source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e2
dependencies = [
"async-trait",
"bytes 0.5.6",
"derive_more 0.99.11",
"derive_more",
"either",
"futures 0.3.5",
"futures-timer 3.0.2",
@@ -6695,7 +6516,7 @@ dependencies = [
"atty",
"bip39",
"chrono",
"derive_more 0.99.11",
"derive_more",
"fdlimit",
"futures 0.3.5",
"hex",
@@ -6756,7 +6577,7 @@ name = "sc-client-api"
version = "2.0.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"fnv",
"futures 0.3.5",
"hash-db",
@@ -6834,7 +6655,7 @@ name = "sc-consensus-babe"
version = "0.8.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"fork-tree",
"futures 0.3.5",
"futures-timer 3.0.2",
@@ -6879,7 +6700,7 @@ name = "sc-consensus-babe-rpc"
version = "0.8.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"futures 0.3.5",
"jsonrpc-core",
"jsonrpc-core-client",
@@ -6953,7 +6774,7 @@ name = "sc-executor"
version = "0.8.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"lazy_static",
"libsecp256k1",
"log 0.4.11",
@@ -6982,7 +6803,7 @@ name = "sc-executor-common"
version = "0.8.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"log 0.4.11",
"parity-scale-codec",
"parity-wasm",
@@ -7032,7 +6853,7 @@ name = "sc-finality-grandpa"
version = "0.8.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"finality-grandpa",
"fork-tree",
"futures 0.3.5",
@@ -7069,7 +6890,7 @@ name = "sc-finality-grandpa-rpc"
version = "0.8.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"finality-grandpa",
"futures 0.3.5",
"jsonrpc-core",
@@ -7112,7 +6933,7 @@ version = "2.0.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"async-trait",
"derive_more 0.99.11",
"derive_more",
"futures 0.3.5",
"futures-util",
"hex",
@@ -7155,7 +6976,7 @@ dependencies = [
"bitflags",
"bs58",
"bytes 0.5.6",
"derive_more 0.99.11",
"derive_more",
"either",
"erased-serde",
"fnv",
@@ -7301,7 +7122,7 @@ name = "sc-rpc-api"
version = "0.8.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"futures 0.3.5",
"jsonrpc-core",
"jsonrpc-core-client",
@@ -7343,7 +7164,7 @@ name = "sc-service"
version = "0.8.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"directories",
"exit-future",
"futures 0.1.29",
@@ -7480,7 +7301,7 @@ name = "sc-transaction-graph"
version = "2.0.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"futures 0.3.5",
"linked-hash-map",
"log 0.4.11",
@@ -7501,7 +7322,7 @@ name = "sc-transaction-pool"
version = "2.0.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"futures 0.3.5",
"futures-diagnose",
"intervalier",
@@ -7870,53 +7691,6 @@ version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252"
[[package]]
name = "smol"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "620cbb3c6e34da57d3a248cda0cd01cd5848164dc062e764e65d06fe3ea7aed5"
dependencies = [
"async-task 3.0.0",
"blocking 0.4.7",
"concurrent-queue",
"fastrand",
"futures-io",
"futures-util",
"libc",
"once_cell 1.4.1",
"scoped-tls",
"slab",
"socket2",
"wepoll-sys-stjepang",
"winapi 0.3.9",
]
[[package]]
name = "smol"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67583f4ccc13bbb105a0752058d8ad66c47753d85445952809bcaca891954f83"
dependencies = [
"async-channel",
"async-executor 0.1.2",
"async-io 0.1.5",
"blocking 0.5.0",
"cfg-if",
"easy-parallel",
"futures-lite 0.1.10",
"num_cpus",
]
[[package]]
name = "smol-timeout"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "024818c1f00b80e8171ddcfcee33860134293aa3aced60c9cbd7a5a2d41db392"
dependencies = [
"pin-project",
"smol 0.1.18",
]
[[package]]
name = "snow"
version = "0.7.1"
@@ -7968,7 +7742,7 @@ name = "sp-allocator"
version = "2.0.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"log 0.4.11",
"sp-core",
"sp-std",
@@ -8067,7 +7841,7 @@ name = "sp-blockchain"
version = "2.0.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"log 0.4.11",
"lru 0.4.3",
"parity-scale-codec",
@@ -8093,7 +7867,7 @@ name = "sp-consensus"
version = "0.8.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"futures 0.3.5",
"futures-timer 3.0.2",
"libp2p",
@@ -8250,7 +8024,7 @@ name = "sp-inherents"
version = "2.0.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"parity-scale-codec",
"parking_lot 0.10.2",
"sp-core",
@@ -8298,7 +8072,7 @@ version = "0.8.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"async-trait",
"derive_more 0.99.11",
"derive_more",
"futures 0.3.5",
"merlin",
"parity-scale-codec",
@@ -8525,7 +8299,7 @@ name = "sp-transaction-pool"
version = "2.0.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"derive_more 0.99.11",
"derive_more",
"futures 0.3.5",
"log 0.4.11",
"parity-scale-codec",
@@ -8777,7 +8551,7 @@ version = "0.8.0"
source = "git+https://github.com/paritytech/substrate#d766e229466d63afadd19097e277d85146fee3c9"
dependencies = [
"async-std",
"derive_more 0.99.11",
"derive_more",
"futures-util",
"hyper 0.13.6",
"log 0.4.11",
@@ -8984,18 +8758,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.20"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08"
checksum = "318234ffa22e0920fe9a40d7b8369b5f649d490980cf7aadcf1eb91594869b42"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.20"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793"
checksum = "cae2447b6282786c3493999f40a9be2a6ad20cb8bd268b0a0dbf5a065535c0ab"
dependencies = [
"proc-macro2 1.0.18",
"quote 1.0.7",
@@ -9651,12 +9425,6 @@ version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c"
[[package]]
name = "vec-arena"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17dfb54bf57c9043f4616cb03dab30eff012cc26631b797d8354b916708db919"
[[package]]
name = "vec-arena"
version = "1.0.0"
+3 -10
View File
@@ -14,19 +14,12 @@ wasm-opt = false
crate-type = ["cdylib", "rlib"]
[dependencies]
log = "0.4.8"
futures = { version = "0.3.4", features = ["compat"] }
structopt = "0.3.8"
sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
log = "0.4.11"
structopt = { version = "0.3.8", optional = true }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-client-db = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" }
service = { package = "polkadot-service", path = "../node/service", default-features = false, optional = true }
tokio = { version = "0.2.13", features = ["rt-threaded"], optional = true }
frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
@@ -46,7 +39,7 @@ default = [ "wasmtime", "db", "cli", "full-node", "trie-memory-tracker" ]
wasmtime = [ "sc-cli/wasmtime" ]
db = [ "service/db" ]
cli = [
"tokio",
"structopt",
"sc-cli",
"sc-service",
"frame-benchmarking-cli",
-1
View File
@@ -17,7 +17,6 @@
//! Polkadot CLI library.
#![warn(missing_docs)]
#![warn(unused_extern_crates)]
#[cfg(feature = "browser")]
mod browser;
+1 -1
View File
@@ -10,4 +10,4 @@ reed_solomon = { package = "reed-solomon-erasure", version = "4.0.2"}
codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "master" }
derive_more = "0.15.0"
thiserror = "1.0.21"
+15 -6
View File
@@ -30,6 +30,7 @@ use primitives::v0::{self, Hash as H256, BlakeTwo256, HashT};
use primitives::v1;
use sp_core::Blake2Hasher;
use trie::{EMPTY_PREFIX, MemoryDB, Trie, TrieMut, trie_types::{TrieDBMut, TrieDB}};
use thiserror::Error;
use self::wrapped_shard::WrappedShard;
@@ -39,35 +40,43 @@ mod wrapped_shard;
const MAX_VALIDATORS: usize = <galois_16::Field as reed_solomon::Field>::ORDER;
/// Errors in erasure coding.
#[derive(Debug, Clone, PartialEq, derive_more::Display)]
#[derive(Debug, Clone, PartialEq, Error)]
pub enum Error {
/// Returned when there are too many validators.
#[error("There are too many validators")]
TooManyValidators,
/// Cannot encode something for no validators
#[error("Validator set is empty")]
EmptyValidators,
/// Cannot reconstruct: wrong number of validators.
#[error("Validator count mismatches between encoding and decoding")]
WrongValidatorCount,
/// Not enough chunks present.
#[error("Not enough chunks to reconstruct message")]
NotEnoughChunks,
/// Too many chunks present.
#[error("Too many chunks present")]
TooManyChunks,
/// Chunks not of uniform length or the chunks are empty.
#[error("Chunks are not unform, mismatch in length or are zero sized")]
NonUniformChunks,
/// An uneven byte-length of a shard is not valid for GF(2^16) encoding.
#[error("Uneven length is not valid for field GF(2^16)")]
UnevenLength,
/// Chunk index out of bounds.
#[display(fmt = "Chunk is out of bounds: {} {}", _0, _1)]
ChunkIndexOutOfBounds(usize, usize),
#[error("Chunk is out of bounds: {chunk_index} not included in 0..{n_validators}")]
ChunkIndexOutOfBounds{ chunk_index: usize, n_validators: usize },
/// Bad payload in reconstructed bytes.
#[error("Reconstructed payload invalid")]
BadPayload,
/// Invalid branch proof.
#[error("Invalid branch proof")]
InvalidBranchProof,
/// Branch out of bounds.
#[error("Branch is out of bounds")]
BranchOutOfBounds,
}
impl std::error::Error for Error { }
#[derive(Debug, PartialEq)]
struct CodeParams {
data_shards: usize,
@@ -206,7 +215,7 @@ fn reconstruct<'a, I: 'a, T: Decode>(n_validators: usize, chunks: I) -> Result<T
let mut shard_len = None;
for (chunk_data, chunk_idx) in chunks.into_iter().take(n_validators) {
if chunk_idx >= n_validators {
return Err(Error::ChunkIndexOutOfBounds(chunk_idx, n_validators));
return Err(Error::ChunkIndexOutOfBounds{ chunk_index: chunk_idx, n_validators });
}
let shard_len = shard_len.get_or_insert_with(|| chunk_data.len());
@@ -5,7 +5,6 @@ authors = ["Parity Technologies <admin@parity.io>"]
edition = "2018"
[dependencies]
derive_more = "0.99.9"
futures = "0.3.5"
log = "0.4.8"
polkadot-erasure-coding = { path = "../../erasure-coding" }
@@ -14,6 +13,7 @@ polkadot-node-subsystem = { path = "../subsystem" }
polkadot-node-subsystem-util = { path = "../subsystem-util" }
polkadot-primitives = { path = "../../primitives" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
thiserror = "1.0.21"
[dev-dependencies]
polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" }
+12 -11
View File
@@ -14,19 +14,20 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use thiserror::Error;
#[derive(Debug, derive_more::From)]
#[derive(Debug, Error)]
pub enum Error {
#[from]
Subsystem(polkadot_node_subsystem::SubsystemError),
#[from]
OneshotRecv(futures::channel::oneshot::Canceled),
#[from]
Runtime(polkadot_node_subsystem::errors::RuntimeApiError),
#[from]
Util(polkadot_node_subsystem_util::Error),
#[from]
Erasure(polkadot_erasure_coding::Error),
#[error(transparent)]
Subsystem(#[from] polkadot_node_subsystem::SubsystemError),
#[error(transparent)]
OneshotRecv(#[from] futures::channel::oneshot::Canceled),
#[error(transparent)]
Runtime(#[from] polkadot_node_subsystem::errors::RuntimeApiError),
#[error(transparent)]
Util(#[from] polkadot_node_subsystem_util::Error),
#[error(transparent)]
Erasure(#[from] polkadot_erasure_coding::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
+2 -2
View File
@@ -5,12 +5,12 @@ authors = ["Parity Technologies <admin@parity.io>"]
edition = "2018"
[dependencies]
derive_more = "0.99.9"
futures = "0.3.5"
futures-timer = "3.0.2"
kvdb = "0.7.0"
kvdb-rocksdb = "0.9.1"
log = "0.4.8"
log = "0.4.11"
thiserror = "1.0.21"
codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] }
erasure = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" }
+16 -15
View File
@@ -44,6 +44,7 @@ use polkadot_node_subsystem_util::metrics::{self, prometheus};
use polkadot_subsystem::messages::{
AllMessages, AvailabilityStoreMessage, ChainApiMessage, RuntimeApiMessage, RuntimeApiRequest,
};
use thiserror::Error;
const LOG_TARGET: &str = "availability";
@@ -53,22 +54,22 @@ mod columns {
pub const NUM_COLUMNS: u32 = 2;
}
#[derive(Debug, derive_more::From)]
#[derive(Debug, Error)]
enum Error {
#[from]
Chain(ChainApiError),
#[from]
Erasure(erasure::Error),
#[from]
Io(io::Error),
#[from]
Oneshot(oneshot::Canceled),
#[from]
Runtime(RuntimeApiError),
#[from]
Subsystem(SubsystemError),
#[from]
Time(SystemTimeError),
#[error(transparent)]
RuntimeAPI(#[from] RuntimeApiError),
#[error(transparent)]
ChainAPI(#[from] ChainApiError),
#[error(transparent)]
Erasure(#[from] erasure::Error),
#[error(transparent)]
Io(#[from] io::Error),
#[error(transparent)]
Oneshot(#[from] oneshot::Canceled),
#[error(transparent)]
Subsystem(#[from] SubsystemError),
#[error(transparent)]
Time(#[from] SystemTimeError),
}
/// A wrapper type for delays.
+2 -5
View File
@@ -6,19 +6,16 @@ edition = "2018"
[dependencies]
futures = "0.3.5"
sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-primitives = { path = "../../primitives" }
polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" }
statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" }
derive_more = "0.99.9"
bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] }
log = "0.4.8"
log = "0.4.11"
thiserror = "1.0.21"
[dev-dependencies]
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+20 -14
View File
@@ -16,6 +16,8 @@
//! Implements a `CandidateBackingSubsystem`.
#![deny(unused_crate_dependencies)]
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::pin::Pin;
@@ -64,22 +66,26 @@ use statement_table::{
SignedStatement as TableSignedStatement, Summary as TableSummary,
},
};
use thiserror::Error;
#[derive(Debug, derive_more::From)]
#[derive(Debug, Error)]
enum Error {
#[error("Candidate is not found")]
CandidateNotFound,
#[error("Signature is invalid")]
InvalidSignature,
StoreFailed,
#[from]
Erasure(erasure_coding::Error),
#[from]
ValidationFailed(ValidationFailed),
#[from]
Oneshot(oneshot::Canceled),
#[from]
Mpsc(mpsc::SendError),
#[from]
UtilError(util::Error),
#[error("Failed to send candidates {0:?}")]
Send(Vec<NewBackedCandidate>),
#[error("Oneshot never resolved")]
Oneshot(#[from] #[source] oneshot::Canceled),
#[error("Obtaining erasure chunks failed")]
ObtainErasureChunks(#[from] #[source] erasure_coding::Error),
#[error(transparent)]
ValidationFailed(#[from] ValidationFailed),
#[error(transparent)]
Mpsc(#[from] mpsc::SendError),
#[error(transparent)]
UtilError(#[from] util::Error),
}
/// Holds all data needed for candidate backing job operation.
@@ -468,7 +474,7 @@ impl CandidateBackingJob {
CandidateBackingMessage::GetBackedCandidates(_, tx) => {
let backed = self.get_backed();
tx.send(backed).map_err(|_| oneshot::Canceled)?;
tx.send(backed).map_err(|data| Error::Send(data))?;
}
}
@@ -640,7 +646,7 @@ impl CandidateBackingJob {
)
).await?;
rx.await?.map_err(|_| Error::StoreFailed)?;
let _ = rx.await?;
Ok(())
}
@@ -6,11 +6,11 @@ edition = "2018"
[dependencies]
bitvec = "0.17.4"
derive_more = "0.99.9"
futures = "0.3.5"
log = "0.4.8"
log = "0.4.11"
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-subsystem = { path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
wasm-timer = "0.2.4"
thiserror = "1.0.21"
+20 -14
View File
@@ -16,6 +16,9 @@
//! The bitfield signing subsystem produces `SignedAvailabilityBitfield`s once per block.
#![deny(unused_crate_dependencies, unused_results)]
#![warn(missing_docs)]
use bitvec::bitvec;
use futures::{
channel::{mpsc, oneshot},
@@ -37,6 +40,7 @@ use polkadot_node_subsystem_util::{
use polkadot_primitives::v1::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex};
use std::{convert::TryFrom, pin::Pin, time::Duration};
use wasm_timer::{Delay, Instant};
use thiserror::Error;
/// Delay between starting a bitfield signing job and its attempting to create a bitfield.
const JOB_DELAY: Duration = Duration::from_millis(1500);
@@ -45,6 +49,7 @@ const JOB_DELAY: Duration = Duration::from_millis(1500);
pub struct BitfieldSigningJob;
/// Messages which a `BitfieldSigningJob` is prepared to receive.
#[allow(missing_docs)]
pub enum ToJob {
BitfieldSigning(BitfieldSigningMessage),
Stop,
@@ -79,6 +84,7 @@ impl From<BitfieldSigningMessage> for ToJob {
}
/// Messages which may be sent from a `BitfieldSigningJob`.
#[allow(missing_docs)]
pub enum FromJob {
AvailabilityStore(AvailabilityStoreMessage),
BitfieldDistribution(BitfieldDistributionMessage),
@@ -112,28 +118,28 @@ impl TryFrom<AllMessages> for FromJob {
}
/// Errors we may encounter in the course of executing the `BitfieldSigningSubsystem`.
#[derive(Debug, derive_more::From)]
#[derive(Debug, Error)]
pub enum Error {
/// error propagated from the utility subsystem
#[from]
Util(util::Error),
#[error(transparent)]
Util(#[from] util::Error),
/// io error
#[from]
Io(std::io::Error),
#[error(transparent)]
Io(#[from] std::io::Error),
/// a one shot channel was canceled
#[from]
Oneshot(oneshot::Canceled),
#[error(transparent)]
Oneshot(#[from] oneshot::Canceled),
/// a mspc channel failed to send
#[from]
MpscSend(mpsc::SendError),
#[error(transparent)]
MpscSend(#[from] mpsc::SendError),
/// several errors collected into one
#[from]
#[error("Multiple errours occured: {0:?}")]
Multiple(Vec<Error>),
/// the runtime API failed to return what we wanted
#[from]
Runtime(RuntimeApiError),
#[error(transparent)]
Runtime(#[from] RuntimeApiError),
/// the keystore failed to process signing request
#[from]
#[error("Keystore failed: {0:?}")]
Keystore(KeystoreError),
}
@@ -252,7 +258,7 @@ async fn construct_availability_bitfield(
if errs.is_empty() {
Ok(out.into_inner().into())
} else {
Err(errs.into())
Err(Error::Multiple(errs.into()))
}
}
@@ -5,9 +5,9 @@ authors = ["Parity Technologies <admin@parity.io>"]
edition = "2018"
[dependencies]
derive_more = "0.99.9"
futures = "0.3.5"
log = "0.4.8"
log = "0.4.11"
thiserror = "1.0.21"
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-primitives = { path = "../../primitives" }
polkadot-node-subsystem = { path = "../../subsystem" }
@@ -17,7 +17,7 @@
//! The provisioner is responsible for assembling a relay chain block
//! from a set of available parachain candidates of its choice.
#![deny(missing_docs)]
#![deny(missing_docs, unused_crate_dependencies, unused_results)]
use futures::{
channel::{mpsc, oneshot},
@@ -39,6 +39,7 @@ use polkadot_primitives::v1::{
CandidateDescriptor, CandidateReceipt, CollatorId, Hash, Id as ParaId, PoV,
};
use std::{convert::TryFrom, pin::Pin, sync::Arc};
use thiserror::Error;
const TARGET: &'static str = "candidate_selection";
@@ -116,18 +117,18 @@ impl TryFrom<AllMessages> for FromJob {
}
}
#[derive(Debug, derive_more::From)]
#[derive(Debug, Error)]
enum Error {
#[from]
Sending(mpsc::SendError),
#[from]
Util(util::Error),
#[from]
OneshotRecv(oneshot::Canceled),
#[from]
ChainApi(ChainApiError),
#[from]
Runtime(RuntimeApiError),
#[error(transparent)]
Sending(#[from] mpsc::SendError),
#[error(transparent)]
Util(#[from] util::Error),
#[error(transparent)]
OneshotRecv(#[from] oneshot::Canceled),
#[error(transparent)]
ChainApi(#[from] ChainApiError),
#[error(transparent)]
Runtime(#[from] RuntimeApiError),
}
impl JobTrait for CandidateSelectionJob {
@@ -149,14 +150,13 @@ impl JobTrait for CandidateSelectionJob {
receiver: mpsc::Receiver<ToJob>,
sender: mpsc::Sender<FromJob>,
) -> Pin<Box<dyn Future<Output = Result<(), Self::Error>> + Send>> {
async move {
Box::pin(async move {
let job = CandidateSelectionJob::new(metrics, sender, receiver);
// it isn't necessary to break run_loop into its own function,
// but it's convenient to separate the concerns in this way
job.run_loop().await
}
.boxed()
})
}
}
@@ -5,11 +5,9 @@ authors = ["Parity Technologies <admin@parity.io>"]
edition = "2018"
[dependencies]
derive_more = "0.99.9"
futures = "0.3.5"
log = "0.4.8"
log = "0.4.11"
sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-core = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "master" }
parity-scale-codec = { version = "1.3.0", default-features = false, features = ["bit-vec", "derive"] }
@@ -20,8 +20,11 @@
//! according to a validation function. This delegates validation to an underlying
//! pool of processes used for execution of the Wasm.
#![deny(unused_crate_dependencies, unused_results)]
#![warn(missing_docs)]
use polkadot_subsystem::{
Subsystem, SubsystemContext, SpawnedSubsystem, SubsystemResult,
Subsystem, SubsystemContext, SpawnedSubsystem, SubsystemResult, SubsystemError,
FromOverseer, OverseerSignal,
messages::{
AllMessages, CandidateValidationMessage, RuntimeApiMessage,
@@ -116,9 +119,13 @@ impl<S, C> Subsystem<C> for CandidateValidationSubsystem<S> where
S: SpawnNamed + Clone + 'static,
{
fn start(self, ctx: C) -> SpawnedSubsystem {
let future = run(ctx, self.spawn, self.metrics)
.map_err(|e| SubsystemError::with_origin("candidate-validation", e))
.map(|_| ())
.boxed();
SpawnedSubsystem {
name: "candidate-validation-subsystem",
future: run(ctx, self.spawn, self.metrics).map(|_| ()).boxed(),
future,
}
}
}
+13 -3
View File
@@ -27,9 +27,12 @@
//! * Last finalized block number
//! * Ancestors
#![deny(unused_crate_dependencies, unused_results)]
#![warn(missing_docs)]
use polkadot_subsystem::{
FromOverseer, OverseerSignal,
SpawnedSubsystem, Subsystem, SubsystemResult, SubsystemContext,
SpawnedSubsystem, Subsystem, SubsystemResult, SubsystemError, SubsystemContext,
messages::ChainApiMessage,
};
use polkadot_node_subsystem_util::{
@@ -61,8 +64,12 @@ impl<Client, Context> Subsystem<Context> for ChainApiSubsystem<Client> where
Context: SubsystemContext<Message = ChainApiMessage>
{
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = run(ctx, self)
.map_err(|e| SubsystemError::with_origin("chain-api", e))
.map(|_| ())
.boxed();
SpawnedSubsystem {
future: run(ctx, self).map(|_| ()).boxed(),
future,
name: "chain-api-subsystem",
}
}
@@ -112,7 +119,10 @@ where
let maybe_header = subsystem.client.header(BlockId::Hash(hash));
match maybe_header {
// propagate the error
Err(e) => Some(Err(e.to_string().into())),
Err(e) => {
let e = e.to_string().into();
Some(Err(e))
},
// fewer than `k` ancestors are available
Ok(None) => None,
Ok(Some(header)) => {
-4
View File
@@ -6,16 +6,13 @@ edition = "2018"
[dependencies]
futures = "0.3.4"
futures-timer = "3.0.1"
log = "0.4.8"
parity-scale-codec = "1.3.4"
polkadot-node-subsystem = { path = "../../subsystem" }
polkadot-overseer = { path = "../../overseer" }
polkadot-primitives = { path = "../../../primitives" }
sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
@@ -23,5 +20,4 @@ sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" }
tokio-executor = { version = "0.2.0-alpha.6", features = ["blocking"] }
wasm-timer = "0.2.4"
+23 -2
View File
@@ -1,3 +1,23 @@
// Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! The proposer proposes new blocks to include
#![deny(unused_crate_dependencies, unused_results)]
use futures::prelude::*;
use futures::select;
use polkadot_node_subsystem::{messages::{AllMessages, ProvisionerInherentData, ProvisionerMessage}, SubsystemError};
@@ -123,7 +143,7 @@ where
let (sender, receiver) = futures::channel::oneshot::channel();
overseer.wait_for_activation(parent_header_hash, sender).await?;
receiver.await.map_err(Error::ClosedChannelFromProvisioner)?;
receiver.await.map_err(Error::ClosedChannelFromProvisioner)??;
let (sender, receiver) = futures::channel::oneshot::channel();
// strictly speaking, we don't _have_ to .await this send_msg before opening the
@@ -206,7 +226,7 @@ where
// It would have been more ergonomic to use thiserror to derive the
// From implementations, Display, and std::error::Error, but unfortunately
// two of the wrapped errors (sp_inherents::Error, SubsystemError) also
// one of the wrapped errors (sp_inherents::Error) also
// don't impl std::error::Error, which breaks the thiserror derive.
#[derive(Debug)]
pub enum Error {
@@ -261,6 +281,7 @@ impl std::error::Error for Error {
Self::Consensus(err) => Some(err),
Self::Blockchain(err) => Some(err),
Self::ClosedChannelFromProvisioner(err) => Some(err),
Self::Subsystem(err) => Some(err),
_ => None
}
}
+2 -4
View File
@@ -6,16 +6,14 @@ edition = "2018"
[dependencies]
bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] }
derive_more = "0.99.9"
futures = "0.3.5"
log = "0.4.8"
log = "0.4.11"
thiserror = "1.0.21"
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-subsystem = { path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
[dev-dependencies]
lazy_static = "1.4"
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
+24 -395
View File
@@ -17,7 +17,7 @@
//! The provisioner is responsible for assembling a relay chain block
//! from a set of available parachain candidates of its choice.
#![deny(missing_docs)]
#![deny(missing_docs, unused_crate_dependencies, unused_results)]
use bitvec::vec::BitVec;
use futures::{
@@ -42,6 +42,7 @@ use polkadot_primitives::v1::{
SignedAvailabilityBitfield,
};
use std::{collections::HashMap, convert::TryFrom, pin::Pin};
use thiserror::Error;
struct ProvisioningJob {
relay_parent: Hash,
@@ -115,19 +116,25 @@ impl TryFrom<AllMessages> for FromJob {
}
}
#[derive(Debug, derive_more::From)]
#[derive(Debug, Error)]
enum Error {
#[from]
Sending(mpsc::SendError),
#[from]
Util(util::Error),
#[from]
OneshotRecv(oneshot::Canceled),
#[from]
ChainApi(ChainApiError),
#[from]
Runtime(RuntimeApiError),
OneshotSend,
#[error(transparent)]
Util(#[from] util::Error),
#[error(transparent)]
OneshotRecv(#[from] oneshot::Canceled),
#[error(transparent)]
ChainApi(#[from] ChainApiError),
#[error(transparent)]
Runtime(#[from] RuntimeApiError),
#[error("Failed to send message to ChainAPI")]
ChainApiMessageSend(#[source] mpsc::SendError),
#[error("Failed to send return message with Inherents")]
InherentDataReturnChannel,
}
impl JobTrait for ProvisioningJob {
@@ -230,7 +237,7 @@ impl ProvisioningJob {
let tail = bad_indices[bad_indices.len() - 1];
let retain = *idx != tail;
if *idx >= tail {
bad_indices.pop();
let _ = bad_indices.pop();
}
retain
})
@@ -299,7 +306,7 @@ async fn send_inherent_data(
return_sender
.send((bitfields, candidates))
.map_err(|_| Error::OneshotSend)?;
.map_err(|_data| Error::InherentDataReturnChannel)?;
Ok(())
}
@@ -423,7 +430,7 @@ async fn get_block_number_under_construction(
tx,
)))
.await
.map_err(|_| Error::OneshotSend)?;
.map_err(|e| Error::ChainApiMessageSend(e))?;
match rx.await? {
Ok(Some(n)) => Ok(n + 1),
Ok(None) => Ok(0),
@@ -504,382 +511,4 @@ impl metrics::Metrics for Metrics {
delegated_subsystem!(ProvisioningJob((), Metrics) <- ToJob as ProvisioningSubsystem);
#[cfg(test)]
mod tests {
use super::*;
use bitvec::bitvec;
use polkadot_primitives::v1::{OccupiedCore, ScheduledCore};
pub fn occupied_core(para_id: u32) -> CoreState {
CoreState::Occupied(OccupiedCore {
para_id: para_id.into(),
group_responsible: para_id.into(),
next_up_on_available: None,
occupied_since: 100_u32,
time_out_at: 200_u32,
next_up_on_time_out: None,
availability: default_bitvec(),
})
}
pub fn build_occupied_core<Builder>(para_id: u32, builder: Builder) -> CoreState
where
Builder: FnOnce(&mut OccupiedCore),
{
let mut core = match occupied_core(para_id) {
CoreState::Occupied(core) => core,
_ => unreachable!(),
};
builder(&mut core);
CoreState::Occupied(core)
}
pub fn default_bitvec() -> CoreAvailability {
bitvec![bitvec::order::Lsb0, u8; 0; 32]
}
pub fn scheduled_core(id: u32) -> ScheduledCore {
ScheduledCore {
para_id: id.into(),
..Default::default()
}
}
mod select_availability_bitfields {
use super::super::*;
use super::{default_bitvec, occupied_core};
use futures::executor::block_on;
use std::sync::Arc;
use polkadot_primitives::v1::{SigningContext, ValidatorIndex, ValidatorId};
use sp_application_crypto::AppKey;
use sp_keystore::{CryptoStore, SyncCryptoStorePtr};
use sc_keystore::LocalKeystore;
async fn signed_bitfield(
keystore: &SyncCryptoStorePtr,
field: CoreAvailability,
validator_idx: ValidatorIndex,
) -> SignedAvailabilityBitfield {
let public = CryptoStore::sr25519_generate_new(&**keystore, ValidatorId::ID, None)
.await
.expect("generated sr25519 key");
SignedAvailabilityBitfield::sign(
&keystore,
field.into(),
&<SigningContext<Hash>>::default(),
validator_idx,
&public.into(),
).await.expect("Should be signed")
}
#[test]
fn not_more_than_one_per_validator() {
// Configure filesystem-based keystore as generating keys without seed
// would trigger the key to be generated on the filesystem.
let keystore_path = tempfile::tempdir().expect("Creates keystore path");
let keystore : SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None)
.expect("Creates keystore"));
let bitvec = default_bitvec();
let cores = vec![occupied_core(0), occupied_core(1)];
// we pass in three bitfields with two validators
// this helps us check the postcondition that we get two bitfields back, for which the validators differ
let bitfields = vec![
block_on(signed_bitfield(&keystore, bitvec.clone(), 0)),
block_on(signed_bitfield(&keystore, bitvec.clone(), 1)),
block_on(signed_bitfield(&keystore, bitvec, 1)),
];
let mut selected_bitfields = select_availability_bitfields(&cores, &bitfields);
selected_bitfields.sort_by_key(|bitfield| bitfield.validator_index());
assert_eq!(selected_bitfields.len(), 2);
assert_eq!(selected_bitfields[0], bitfields[0]);
// we don't know which of the (otherwise equal) bitfields will be selected
assert!(selected_bitfields[1] == bitfields[1] || selected_bitfields[1] == bitfields[2]);
}
#[test]
fn each_corresponds_to_an_occupied_core() {
// Configure filesystem-based keystore as generating keys without seed
// would trigger the key to be generated on the filesystem.
let keystore_path = tempfile::tempdir().expect("Creates keystore path");
let keystore : SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None)
.expect("Creates keystore"));
let bitvec = default_bitvec();
let cores = vec![CoreState::Free, CoreState::Scheduled(Default::default())];
let bitfields = vec![
block_on(signed_bitfield(&keystore, bitvec.clone(), 0)),
block_on(signed_bitfield(&keystore, bitvec.clone(), 1)),
block_on(signed_bitfield(&keystore, bitvec, 1)),
];
let mut selected_bitfields = select_availability_bitfields(&cores, &bitfields);
selected_bitfields.sort_by_key(|bitfield| bitfield.validator_index());
// bitfields not corresponding to occupied cores are not selected
assert!(selected_bitfields.is_empty());
}
#[test]
fn more_set_bits_win_conflicts() {
// Configure filesystem-based keystore as generating keys without seed
// would trigger the key to be generated on the filesystem.
let keystore_path = tempfile::tempdir().expect("Creates keystore path");
let keystore : SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None)
.expect("Creates keystore"));
let bitvec_zero = default_bitvec();
let bitvec_one = {
let mut bitvec = bitvec_zero.clone();
bitvec.set(0, true);
bitvec
};
let cores = vec![occupied_core(0)];
let bitfields = vec![
block_on(signed_bitfield(&keystore, bitvec_zero, 0)),
block_on(signed_bitfield(&keystore, bitvec_one.clone(), 0)),
];
// this test is probablistic: chances are excellent that it does what it claims to.
// it cannot fail unless things are broken.
// however, there is a (very small) chance that it passes when things are broken.
for _ in 0..64 {
let selected_bitfields = select_availability_bitfields(&cores, &bitfields);
assert_eq!(selected_bitfields.len(), 1);
assert_eq!(selected_bitfields[0].payload().0, bitvec_one);
}
}
}
mod select_candidates {
use futures_timer::Delay;
use super::super::*;
use super::{build_occupied_core, default_bitvec, occupied_core, scheduled_core};
use polkadot_node_subsystem::messages::RuntimeApiRequest::{
AvailabilityCores, PersistedValidationData as PersistedValidationDataReq,
};
use polkadot_primitives::v1::{
BlockNumber, CandidateDescriptor, CommittedCandidateReceipt, PersistedValidationData,
};
use FromJob::{ChainApi, Runtime};
const BLOCK_UNDER_PRODUCTION: BlockNumber = 128;
fn test_harness<OverseerFactory, Overseer, TestFactory, Test>(
overseer_factory: OverseerFactory,
test_factory: TestFactory,
) where
OverseerFactory: FnOnce(mpsc::Receiver<FromJob>) -> Overseer,
Overseer: Future<Output = ()>,
TestFactory: FnOnce(mpsc::Sender<FromJob>) -> Test,
Test: Future<Output = ()>,
{
let (tx, rx) = mpsc::channel(64);
let overseer = overseer_factory(rx);
let test = test_factory(tx);
futures::pin_mut!(overseer, test);
futures::executor::block_on(future::select(overseer, test));
}
// For test purposes, we always return this set of availability cores:
//
// [
// 0: Free,
// 1: Scheduled(default),
// 2: Occupied(no next_up set),
// 3: Occupied(next_up_on_available set but not available),
// 4: Occupied(next_up_on_available set and available),
// 5: Occupied(next_up_on_time_out set but not timeout),
// 6: Occupied(next_up_on_time_out set and timeout but available),
// 7: Occupied(next_up_on_time_out set and timeout and not available),
// 8: Occupied(both next_up set, available),
// 9: Occupied(both next_up set, not available, no timeout),
// 10: Occupied(both next_up set, not available, timeout),
// 11: Occupied(next_up_on_available and available, but different successor para_id)
// ]
fn mock_availability_cores() -> Vec<CoreState> {
use std::ops::Not;
use CoreState::{Free, Scheduled};
vec![
// 0: Free,
Free,
// 1: Scheduled(default),
Scheduled(scheduled_core(1)),
// 2: Occupied(no next_up set),
occupied_core(2),
// 3: Occupied(next_up_on_available set but not available),
build_occupied_core(3, |core| {
core.next_up_on_available = Some(scheduled_core(3));
}),
// 4: Occupied(next_up_on_available set and available),
build_occupied_core(4, |core| {
core.next_up_on_available = Some(scheduled_core(4));
core.availability = core.availability.clone().not();
}),
// 5: Occupied(next_up_on_time_out set but not timeout),
build_occupied_core(5, |core| {
core.next_up_on_time_out = Some(scheduled_core(5));
}),
// 6: Occupied(next_up_on_time_out set and timeout but available),
build_occupied_core(6, |core| {
core.next_up_on_time_out = Some(scheduled_core(6));
core.time_out_at = BLOCK_UNDER_PRODUCTION;
core.availability = core.availability.clone().not();
}),
// 7: Occupied(next_up_on_time_out set and timeout and not available),
build_occupied_core(7, |core| {
core.next_up_on_time_out = Some(scheduled_core(7));
core.time_out_at = BLOCK_UNDER_PRODUCTION;
}),
// 8: Occupied(both next_up set, available),
build_occupied_core(8, |core| {
core.next_up_on_available = Some(scheduled_core(8));
core.next_up_on_time_out = Some(scheduled_core(8));
core.availability = core.availability.clone().not();
}),
// 9: Occupied(both next_up set, not available, no timeout),
build_occupied_core(9, |core| {
core.next_up_on_available = Some(scheduled_core(9));
core.next_up_on_time_out = Some(scheduled_core(9));
}),
// 10: Occupied(both next_up set, not available, timeout),
build_occupied_core(10, |core| {
core.next_up_on_available = Some(scheduled_core(10));
core.next_up_on_time_out = Some(scheduled_core(10));
core.time_out_at = BLOCK_UNDER_PRODUCTION;
}),
// 11: Occupied(next_up_on_available and available, but different successor para_id)
build_occupied_core(11, |core| {
core.next_up_on_available = Some(scheduled_core(12));
core.availability = core.availability.clone().not();
}),
]
}
async fn mock_overseer(mut receiver: mpsc::Receiver<FromJob>) {
use ChainApiMessage::BlockNumber;
use RuntimeApiMessage::Request;
while let Some(from_job) = receiver.next().await {
match from_job {
ChainApi(BlockNumber(_relay_parent, tx)) => {
tx.send(Ok(Some(BLOCK_UNDER_PRODUCTION - 1))).unwrap()
}
Runtime(Request(
_parent_hash,
PersistedValidationDataReq(_para_id, _assumption, tx),
)) => tx.send(Ok(Some(Default::default()))).unwrap(),
Runtime(Request(_parent_hash, AvailabilityCores(tx))) => {
tx.send(Ok(mock_availability_cores())).unwrap()
}
// non-exhaustive matches are fine for testing
_ => unimplemented!(),
}
}
}
#[test]
fn handles_overseer_failure() {
let overseer = |rx: mpsc::Receiver<FromJob>| async move {
// drop the receiver so it closes and the sender can't send, then just sleep long enough that
// this is almost certainly not the first of the two futures to complete
std::mem::drop(rx);
Delay::new(std::time::Duration::from_secs(1)).await;
};
let test = |mut tx: mpsc::Sender<FromJob>| async move {
// wait so that the overseer can drop the rx before we attempt to send
Delay::new(std::time::Duration::from_millis(50)).await;
let result = select_candidates(&[], &[], &[], Default::default(), &mut tx).await;
println!("{:?}", result);
assert!(std::matches!(result, Err(Error::OneshotSend)));
};
test_harness(overseer, test);
}
#[test]
fn can_succeed() {
test_harness(mock_overseer, |mut tx: mpsc::Sender<FromJob>| async move {
let result = select_candidates(&[], &[], &[], Default::default(), &mut tx).await;
println!("{:?}", result);
assert!(result.is_ok());
})
}
// this tests that only the appropriate candidates get selected.
// To accomplish this, we supply a candidate list containing one candidate per possible core;
// the candidate selection algorithm must filter them to the appropriate set
#[test]
fn selects_correct_candidates() {
let mock_cores = mock_availability_cores();
let empty_hash = PersistedValidationData::<BlockNumber>::default().hash();
let candidate_template = BackedCandidate {
candidate: CommittedCandidateReceipt {
descriptor: CandidateDescriptor {
persisted_validation_data_hash: empty_hash,
..Default::default()
},
..Default::default()
},
validity_votes: Vec::new(),
validator_indices: default_bitvec(),
};
let candidates: Vec<_> = std::iter::repeat(candidate_template)
.take(mock_cores.len())
.enumerate()
.map(|(idx, mut candidate)| {
candidate.candidate.descriptor.para_id = idx.into();
candidate
})
.cycle()
.take(mock_cores.len() * 3)
.enumerate()
.map(|(idx, mut candidate)| {
if idx < mock_cores.len() {
// first go-around: use candidates which should work
candidate
} else if idx < mock_cores.len() * 2 {
// for the second repetition of the candidates, give them the wrong hash
candidate.candidate.descriptor.persisted_validation_data_hash
= Default::default();
candidate
} else {
// third go-around: right hash, wrong para_id
candidate.candidate.descriptor.para_id = idx.into();
candidate
}
})
.collect();
// why those particular indices? see the comments on mock_availability_cores()
let expected_candidates: Vec<_> = [1, 4, 7, 8, 10]
.iter()
.map(|&idx| candidates[idx].clone())
.collect();
test_harness(mock_overseer, |mut tx: mpsc::Sender<FromJob>| async move {
let result =
select_candidates(&mock_cores, &[], &candidates, Default::default(), &mut tx)
.await;
if result.is_err() {
println!("{:?}", result);
}
assert_eq!(result.unwrap(), expected_candidates);
})
}
}
}
mod tests;
+377
View File
@@ -0,0 +1,377 @@
use super::*;
use bitvec::bitvec;
use polkadot_primitives::v1::{OccupiedCore, ScheduledCore};
pub fn occupied_core(para_id: u32) -> CoreState {
CoreState::Occupied(OccupiedCore {
para_id: para_id.into(),
group_responsible: para_id.into(),
next_up_on_available: None,
occupied_since: 100_u32,
time_out_at: 200_u32,
next_up_on_time_out: None,
availability: default_bitvec(),
})
}
pub fn build_occupied_core<Builder>(para_id: u32, builder: Builder) -> CoreState
where
Builder: FnOnce(&mut OccupiedCore),
{
let mut core = match occupied_core(para_id) {
CoreState::Occupied(core) => core,
_ => unreachable!(),
};
builder(&mut core);
CoreState::Occupied(core)
}
pub fn default_bitvec() -> CoreAvailability {
bitvec![bitvec::order::Lsb0, u8; 0; 32]
}
pub fn scheduled_core(id: u32) -> ScheduledCore {
ScheduledCore {
para_id: id.into(),
..Default::default()
}
}
mod select_availability_bitfields {
use super::super::*;
use super::{default_bitvec, occupied_core};
use futures::executor::block_on;
use std::sync::Arc;
use polkadot_primitives::v1::{SigningContext, ValidatorIndex, ValidatorId};
use sp_application_crypto::AppKey;
use sp_keystore::{CryptoStore, SyncCryptoStorePtr};
use sc_keystore::LocalKeystore;
async fn signed_bitfield(
keystore: &SyncCryptoStorePtr,
field: CoreAvailability,
validator_idx: ValidatorIndex,
) -> SignedAvailabilityBitfield {
let public = CryptoStore::sr25519_generate_new(&**keystore, ValidatorId::ID, None)
.await
.expect("generated sr25519 key");
SignedAvailabilityBitfield::sign(
&keystore,
field.into(),
&<SigningContext<Hash>>::default(),
validator_idx,
&public.into(),
).await.expect("Should be signed")
}
#[test]
fn not_more_than_one_per_validator() {
// Configure filesystem-based keystore as generating keys without seed
// would trigger the key to be generated on the filesystem.
let keystore_path = tempfile::tempdir().expect("Creates keystore path");
let keystore : SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None)
.expect("Creates keystore"));
let bitvec = default_bitvec();
let cores = vec![occupied_core(0), occupied_core(1)];
// we pass in three bitfields with two validators
// this helps us check the postcondition that we get two bitfields back, for which the validators differ
let bitfields = vec![
block_on(signed_bitfield(&keystore, bitvec.clone(), 0)),
block_on(signed_bitfield(&keystore, bitvec.clone(), 1)),
block_on(signed_bitfield(&keystore, bitvec, 1)),
];
let mut selected_bitfields = select_availability_bitfields(&cores, &bitfields);
selected_bitfields.sort_by_key(|bitfield| bitfield.validator_index());
assert_eq!(selected_bitfields.len(), 2);
assert_eq!(selected_bitfields[0], bitfields[0]);
// we don't know which of the (otherwise equal) bitfields will be selected
assert!(selected_bitfields[1] == bitfields[1] || selected_bitfields[1] == bitfields[2]);
}
#[test]
fn each_corresponds_to_an_occupied_core() {
// Configure filesystem-based keystore as generating keys without seed
// would trigger the key to be generated on the filesystem.
let keystore_path = tempfile::tempdir().expect("Creates keystore path");
let keystore : SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None)
.expect("Creates keystore"));
let bitvec = default_bitvec();
let cores = vec![CoreState::Free, CoreState::Scheduled(Default::default())];
let bitfields = vec![
block_on(signed_bitfield(&keystore, bitvec.clone(), 0)),
block_on(signed_bitfield(&keystore, bitvec.clone(), 1)),
block_on(signed_bitfield(&keystore, bitvec, 1)),
];
let mut selected_bitfields = select_availability_bitfields(&cores, &bitfields);
selected_bitfields.sort_by_key(|bitfield| bitfield.validator_index());
// bitfields not corresponding to occupied cores are not selected
assert!(selected_bitfields.is_empty());
}
#[test]
fn more_set_bits_win_conflicts() {
// Configure filesystem-based keystore as generating keys without seed
// would trigger the key to be generated on the filesystem.
let keystore_path = tempfile::tempdir().expect("Creates keystore path");
let keystore : SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None)
.expect("Creates keystore"));
let bitvec_zero = default_bitvec();
let bitvec_one = {
let mut bitvec = bitvec_zero.clone();
bitvec.set(0, true);
bitvec
};
let cores = vec![occupied_core(0)];
let bitfields = vec![
block_on(signed_bitfield(&keystore, bitvec_zero, 0)),
block_on(signed_bitfield(&keystore, bitvec_one.clone(), 0)),
];
// this test is probablistic: chances are excellent that it does what it claims to.
// it cannot fail unless things are broken.
// however, there is a (very small) chance that it passes when things are broken.
for _ in 0..64 {
let selected_bitfields = select_availability_bitfields(&cores, &bitfields);
assert_eq!(selected_bitfields.len(), 1);
assert_eq!(selected_bitfields[0].payload().0, bitvec_one);
}
}
}
mod select_candidates {
use futures_timer::Delay;
use super::super::*;
use super::{build_occupied_core, default_bitvec, occupied_core, scheduled_core};
use polkadot_node_subsystem::messages::RuntimeApiRequest::{
AvailabilityCores, PersistedValidationData as PersistedValidationDataReq,
};
use polkadot_primitives::v1::{
BlockNumber, CandidateDescriptor, CommittedCandidateReceipt, PersistedValidationData,
};
use FromJob::{ChainApi, Runtime};
const BLOCK_UNDER_PRODUCTION: BlockNumber = 128;
fn test_harness<OverseerFactory, Overseer, TestFactory, Test>(
overseer_factory: OverseerFactory,
test_factory: TestFactory,
) where
OverseerFactory: FnOnce(mpsc::Receiver<FromJob>) -> Overseer,
Overseer: Future<Output = ()>,
TestFactory: FnOnce(mpsc::Sender<FromJob>) -> Test,
Test: Future<Output = ()>,
{
let (tx, rx) = mpsc::channel(64);
let overseer = overseer_factory(rx);
let test = test_factory(tx);
futures::pin_mut!(overseer, test);
let _ = futures::executor::block_on(future::select(overseer, test));
}
// For test purposes, we always return this set of availability cores:
//
// [
// 0: Free,
// 1: Scheduled(default),
// 2: Occupied(no next_up set),
// 3: Occupied(next_up_on_available set but not available),
// 4: Occupied(next_up_on_available set and available),
// 5: Occupied(next_up_on_time_out set but not timeout),
// 6: Occupied(next_up_on_time_out set and timeout but available),
// 7: Occupied(next_up_on_time_out set and timeout and not available),
// 8: Occupied(both next_up set, available),
// 9: Occupied(both next_up set, not available, no timeout),
// 10: Occupied(both next_up set, not available, timeout),
// 11: Occupied(next_up_on_available and available, but different successor para_id)
// ]
fn mock_availability_cores() -> Vec<CoreState> {
use std::ops::Not;
use CoreState::{Free, Scheduled};
vec![
// 0: Free,
Free,
// 1: Scheduled(default),
Scheduled(scheduled_core(1)),
// 2: Occupied(no next_up set),
occupied_core(2),
// 3: Occupied(next_up_on_available set but not available),
build_occupied_core(3, |core| {
core.next_up_on_available = Some(scheduled_core(3));
}),
// 4: Occupied(next_up_on_available set and available),
build_occupied_core(4, |core| {
core.next_up_on_available = Some(scheduled_core(4));
core.availability = core.availability.clone().not();
}),
// 5: Occupied(next_up_on_time_out set but not timeout),
build_occupied_core(5, |core| {
core.next_up_on_time_out = Some(scheduled_core(5));
}),
// 6: Occupied(next_up_on_time_out set and timeout but available),
build_occupied_core(6, |core| {
core.next_up_on_time_out = Some(scheduled_core(6));
core.time_out_at = BLOCK_UNDER_PRODUCTION;
core.availability = core.availability.clone().not();
}),
// 7: Occupied(next_up_on_time_out set and timeout and not available),
build_occupied_core(7, |core| {
core.next_up_on_time_out = Some(scheduled_core(7));
core.time_out_at = BLOCK_UNDER_PRODUCTION;
}),
// 8: Occupied(both next_up set, available),
build_occupied_core(8, |core| {
core.next_up_on_available = Some(scheduled_core(8));
core.next_up_on_time_out = Some(scheduled_core(8));
core.availability = core.availability.clone().not();
}),
// 9: Occupied(both next_up set, not available, no timeout),
build_occupied_core(9, |core| {
core.next_up_on_available = Some(scheduled_core(9));
core.next_up_on_time_out = Some(scheduled_core(9));
}),
// 10: Occupied(both next_up set, not available, timeout),
build_occupied_core(10, |core| {
core.next_up_on_available = Some(scheduled_core(10));
core.next_up_on_time_out = Some(scheduled_core(10));
core.time_out_at = BLOCK_UNDER_PRODUCTION;
}),
// 11: Occupied(next_up_on_available and available, but different successor para_id)
build_occupied_core(11, |core| {
core.next_up_on_available = Some(scheduled_core(12));
core.availability = core.availability.clone().not();
}),
]
}
async fn mock_overseer(mut receiver: mpsc::Receiver<FromJob>) {
use ChainApiMessage::BlockNumber;
use RuntimeApiMessage::Request;
while let Some(from_job) = receiver.next().await {
match from_job {
ChainApi(BlockNumber(_relay_parent, tx)) => {
tx.send(Ok(Some(BLOCK_UNDER_PRODUCTION - 1))).unwrap()
}
Runtime(Request(
_parent_hash,
PersistedValidationDataReq(_para_id, _assumption, tx),
)) => tx.send(Ok(Some(Default::default()))).unwrap(),
Runtime(Request(_parent_hash, AvailabilityCores(tx))) => {
tx.send(Ok(mock_availability_cores())).unwrap()
}
// non-exhaustive matches are fine for testing
_ => unimplemented!(),
}
}
}
#[test]
fn handles_overseer_failure() {
let overseer = |rx: mpsc::Receiver<FromJob>| async move {
// drop the receiver so it closes and the sender can't send, then just sleep long enough that
// this is almost certainly not the first of the two futures to complete
std::mem::drop(rx);
Delay::new(std::time::Duration::from_secs(1)).await;
};
let test = |mut tx: mpsc::Sender<FromJob>| async move {
// wait so that the overseer can drop the rx before we attempt to send
Delay::new(std::time::Duration::from_millis(50)).await;
let result = select_candidates(&[], &[], &[], Default::default(), &mut tx).await;
println!("{:?}", result);
assert!(std::matches!(result, Err(Error::ChainApiMessageSend(_))));
};
test_harness(overseer, test);
}
#[test]
fn can_succeed() {
test_harness(mock_overseer, |mut tx: mpsc::Sender<FromJob>| async move {
let result = select_candidates(&[], &[], &[], Default::default(), &mut tx).await;
println!("{:?}", result);
assert!(result.is_ok());
})
}
// this tests that only the appropriate candidates get selected.
// To accomplish this, we supply a candidate list containing one candidate per possible core;
// the candidate selection algorithm must filter them to the appropriate set
#[test]
fn selects_correct_candidates() {
let mock_cores = mock_availability_cores();
let empty_hash = PersistedValidationData::<BlockNumber>::default().hash();
let candidate_template = BackedCandidate {
candidate: CommittedCandidateReceipt {
descriptor: CandidateDescriptor {
persisted_validation_data_hash: empty_hash,
..Default::default()
},
..Default::default()
},
validity_votes: Vec::new(),
validator_indices: default_bitvec(),
};
let candidates: Vec<_> = std::iter::repeat(candidate_template)
.take(mock_cores.len())
.enumerate()
.map(|(idx, mut candidate)| {
candidate.candidate.descriptor.para_id = idx.into();
candidate
})
.cycle()
.take(mock_cores.len() * 3)
.enumerate()
.map(|(idx, mut candidate)| {
if idx < mock_cores.len() {
// first go-around: use candidates which should work
candidate
} else if idx < mock_cores.len() * 2 {
// for the second repetition of the candidates, give them the wrong hash
candidate.candidate.descriptor.persisted_validation_data_hash
= Default::default();
candidate
} else {
// third go-around: right hash, wrong para_id
candidate.candidate.descriptor.para_id = idx.into();
candidate
}
})
.collect();
// why those particular indices? see the comments on mock_availability_cores()
let expected_candidates: Vec<_> = [1, 4, 7, 8, 10]
.iter()
.map(|&idx| candidates[idx].clone())
.collect();
test_harness(mock_overseer, |mut tx: mpsc::Sender<FromJob>| async move {
let result =
select_candidates(&mock_cores, &[], &candidates, Default::default(), &mut tx)
.await;
if result.is_err() {
println!("{:?}", result);
}
assert_eq!(result.unwrap(), expected_candidates);
})
}
}
@@ -7,10 +7,8 @@ edition = "2018"
[dependencies]
futures = "0.3.5"
sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-primitives = { path = "../../primitives" }
polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
@@ -19,6 +19,9 @@
//! This provides a clean, ownerless wrapper around the parachain-related runtime APIs. This crate
//! can also be used to cache responses from heavy runtime APIs.
#![deny(unused_crate_dependencies)]
#![warn(missing_docs)]
use polkadot_subsystem::{
Subsystem, SpawnedSubsystem, SubsystemResult, SubsystemContext,
FromOverseer, OverseerSignal,
@@ -7,26 +7,22 @@ edition = "2018"
[dependencies]
futures = "0.3.5"
log = "0.4.11"
streamunordered = "0.5.1"
codec = { package="parity-scale-codec", version = "1.3.4", features = ["std"] }
derive_more = "0.99.9"
polkadot-primitives = { path = "../../../primitives" }
polkadot-erasure-coding = { path = "../../../erasure-coding" }
polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
polkadot-network-bridge = { path = "../../network/bridge" }
polkadot-node-network-protocol = { path = "../../network/protocol" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] }
sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
thiserror = "1.0.21"
[dev-dependencies]
polkadot-subsystem-testhelpers = { package = "polkadot-node-subsystem-test-helpers", path = "../../subsystem-test-helpers" }
bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] }
sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
parking_lot = "0.11.0"
futures-timer = "3.0.2"
env_logger = "0.7.1"
assert_matches = "1.3.0"
@@ -22,56 +22,99 @@
//! peers. Verified in this context means, the erasure chunks contained merkle proof
//! is checked.
#![deny(unused_crate_dependencies, unused_qualifications)]
use codec::{Decode, Encode};
use futures::{channel::oneshot, FutureExt};
use futures::{channel::oneshot, FutureExt, TryFutureExt};
use sp_core::crypto::Public;
use sp_keystore::{CryptoStore, SyncCryptoStorePtr};
use log::{trace, warn};
use polkadot_erasure_coding::branch_hash;
use polkadot_node_network_protocol::{
v1 as protocol_v1, NetworkBridgeEvent, PeerId, ReputationChange as Rep, View,
};
use polkadot_node_subsystem_util::metrics::{self, prometheus};
use polkadot_primitives::v1::{
PARACHAIN_KEY_TYPE_ID,
BlakeTwo256, CommittedCandidateReceipt, CoreState, ErasureChunk,
Hash as Hash, HashT, Id as ParaId,
ValidatorId, ValidatorIndex, SessionIndex,
BlakeTwo256, CommittedCandidateReceipt, CoreState, ErasureChunk, Hash, HashT, Id as ParaId,
SessionIndex, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID,
};
use polkadot_subsystem::messages::{
AllMessages, AvailabilityDistributionMessage, NetworkBridgeMessage, RuntimeApiMessage,
RuntimeApiRequest, AvailabilityStoreMessage, ChainApiMessage,
AllMessages, AvailabilityDistributionMessage, AvailabilityStoreMessage, ChainApiMessage,
NetworkBridgeMessage, RuntimeApiMessage, RuntimeApiRequest,
};
use polkadot_subsystem::{
errors::{ChainApiError, RuntimeApiError},
ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem,
SubsystemContext, SubsystemError,
};
use polkadot_node_subsystem_util::{
metrics::{self, prometheus},
};
use polkadot_node_network_protocol::{
v1 as protocol_v1, View, ReputationChange as Rep, PeerId,
NetworkBridgeEvent,
};
use std::collections::{HashMap, HashSet};
use std::io;
use std::iter;
use thiserror::Error;
const TARGET: &'static str = "avad";
#[derive(Debug, derive_more::From)]
#[derive(Debug, Error)]
enum Error {
#[from]
Erasure(polkadot_erasure_coding::Error),
#[from]
Io(io::Error),
#[from]
Oneshot(oneshot::Canceled),
#[from]
Subsystem(SubsystemError),
#[from]
RuntimeApi(RuntimeApiError),
#[from]
ChainApi(ChainApiError),
#[error("Sending PendingAvailability query failed")]
QueryPendingAvailabilitySendQuery(#[source] SubsystemError),
#[error("Response channel to obtain PendingAvailability failed")]
QueryPendingAvailabilityResponseChannel(#[source] oneshot::Canceled),
#[error("RuntimeAPI to obtain PendingAvailability failed")]
QueryPendingAvailability(#[source] RuntimeApiError),
#[error("Sending StoreChunk query failed")]
StoreChunkSendQuery(#[source] SubsystemError),
#[error("Response channel to obtain StoreChunk failed")]
StoreChunkResponseChannel(#[source] oneshot::Canceled),
#[error("Sending QueryChunk query failed")]
QueryChunkSendQuery(#[source] SubsystemError),
#[error("Response channel to obtain QueryChunk failed")]
QueryChunkResponseChannel(#[source] oneshot::Canceled),
#[error("Sending QueryAncestors query failed")]
QueryAncestorsSendQuery(#[source] SubsystemError),
#[error("Response channel to obtain QueryAncestors failed")]
QueryAncestorsResponseChannel(#[source] oneshot::Canceled),
#[error("RuntimeAPI to obtain QueryAncestors failed")]
QueryAncestors(#[source] ChainApiError),
#[error("Sending QuerySession query failed")]
QuerySessionSendQuery(#[source] SubsystemError),
#[error("Response channel to obtain QuerySession failed")]
QuerySessionResponseChannel(#[source] oneshot::Canceled),
#[error("RuntimeAPI to obtain QuerySession failed")]
QuerySession(#[source] RuntimeApiError),
#[error("Sending QueryValidators query failed")]
QueryValidatorsSendQuery(#[source] SubsystemError),
#[error("Response channel to obtain QueryValidators failed")]
QueryValidatorsResponseChannel(#[source] oneshot::Canceled),
#[error("RuntimeAPI to obtain QueryValidators failed")]
QueryValidators(#[source] RuntimeApiError),
#[error("Sending AvailabilityCores query failed")]
AvailabilityCoresSendQuery(#[source] SubsystemError),
#[error("Response channel to obtain AvailabilityCores failed")]
AvailabilityCoresResponseChannel(#[source] oneshot::Canceled),
#[error("RuntimeAPI to obtain AvailabilityCores failed")]
AvailabilityCores(#[source] RuntimeApiError),
#[error("Sending AvailabilityCores query failed")]
QueryAvailabilitySendQuery(#[source] SubsystemError),
#[error("Response channel to obtain AvailabilityCores failed")]
QueryAvailabilityResponseChannel(#[source] oneshot::Canceled),
#[error("Sending out a peer report message")]
ReportPeerMessageSend(#[source] SubsystemError),
#[error("Sending a gossip message")]
TrackedGossipMessage(#[source] SubsystemError),
#[error("Receive channel closed")]
IncomingMessageChannel(#[source] SubsystemError),
}
type Result<T> = std::result::Result<T, Error>;
@@ -199,22 +242,18 @@ impl ProtocolState {
where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
let candidates =
query_live_candidates(ctx, self, std::iter::once(relay_parent)).await?;
let candidates = query_live_candidates(ctx, self, std::iter::once(relay_parent)).await?;
// register the relation of relay_parent to candidate..
// ..and the reverse association.
for (relay_parent_or_ancestor, (receipt_hash, receipt)) in candidates.clone() {
self
.reverse
self.reverse
.insert(receipt_hash.clone(), relay_parent_or_ancestor.clone());
let per_candidate = self.per_candidate.entry(receipt_hash.clone())
.or_default();
let per_candidate = self.per_candidate.entry(receipt_hash.clone()).or_default();
per_candidate.validator_index = validator_index.clone();
per_candidate.validators = validators.clone();
self
.receipts
self.receipts
.entry(relay_parent_or_ancestor)
.or_default()
.insert((receipt_hash, receipt));
@@ -240,8 +279,7 @@ impl ProtocolState {
.insert(relay_parent);
}
self
.per_relay_parent
self.per_relay_parent
.entry(relay_parent)
.or_default()
.ancestors = ancestors;
@@ -314,17 +352,21 @@ where
}
NetworkBridgeEvent::PeerMessage(remote, msg) => {
let gossiped_availability = match msg {
protocol_v1::AvailabilityDistributionMessage::Chunk(candidate_hash, chunk) =>
AvailabilityGossipMessage { candidate_hash, erasure_chunk: chunk }
protocol_v1::AvailabilityDistributionMessage::Chunk(candidate_hash, chunk) => {
AvailabilityGossipMessage {
candidate_hash,
erasure_chunk: chunk,
}
}
};
process_incoming_peer_message(ctx, state, remote, gossiped_availability, metrics).await?;
process_incoming_peer_message(ctx, state, remote, gossiped_availability, metrics)
.await?;
}
}
Ok(())
}
/// Handle the changes necessary when our view changes.
async fn handle_our_view_change<Context>(
ctx: &mut Context,
@@ -346,19 +388,15 @@ where
for added in added.iter() {
let added = **added;
let validators = query_validators(ctx, added).await?;
let validator_index = obtain_our_validator_index(
&validators,
keystore.clone(),
).await;
state.add_relay_parent(ctx, added, validators, validator_index).await?;
let validator_index = obtain_our_validator_index(&validators, keystore.clone()).await;
state
.add_relay_parent(ctx, added, validators, validator_index)
.await?;
}
// handle all candidates
for (candidate_hash, _receipt) in state.cached_live_candidates_unioned(added) {
let per_candidate = state
.per_candidate
.entry(candidate_hash)
.or_default();
let per_candidate = state.per_candidate.entry(candidate_hash).or_default();
// assure the node has the validator role
if per_candidate.validator_index.is_none() {
@@ -388,19 +426,18 @@ where
// distribute all erasure messages to interested peers
for chunk_index in 0u32..(validator_count as u32) {
// only the peers which did not receive this particular erasure chunk
let per_candidate = state
.per_candidate
.entry(candidate_hash)
.or_default();
let per_candidate = state.per_candidate.entry(candidate_hash).or_default();
// obtain the chunks from the cache, if not fallback
// and query the availability store
let message_id = (candidate_hash, chunk_index);
let erasure_chunk = if let Some(message) = per_candidate.message_vault.get(&chunk_index) {
let erasure_chunk = if let Some(message) = per_candidate.message_vault.get(&chunk_index)
{
message.erasure_chunk.clone()
} else if let Some(erasure_chunk) = query_chunk(ctx, candidate_hash, chunk_index as ValidatorIndex).await? {
} else if let Some(erasure_chunk) =
query_chunk(ctx, candidate_hash, chunk_index as ValidatorIndex).await?
{
erasure_chunk
} else {
continue;
@@ -415,9 +452,7 @@ where
!per_candidate
.sent_messages
.get(*peer)
.filter(|set| {
set.contains(&message_id)
})
.filter(|set| set.contains(&message_id))
.is_some()
})
.map(|peer| peer.clone())
@@ -427,7 +462,8 @@ where
erasure_chunk,
};
send_tracked_gossip_message_to_peers(ctx, per_candidate, metrics, peers, message).await?;
send_tracked_gossip_message_to_peers(ctx, per_candidate, metrics, peers, message)
.await?;
}
}
@@ -450,7 +486,8 @@ async fn send_tracked_gossip_message_to_peers<Context>(
where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
send_tracked_gossip_messages_to_peers(ctx, per_candidate, metrics, peers, iter::once(message)).await
send_tracked_gossip_messages_to_peers(ctx, per_candidate, metrics, peers, iter::once(message))
.await
}
#[inline(always)]
@@ -464,7 +501,8 @@ async fn send_tracked_gossip_messages_to_peer<Context>(
where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
send_tracked_gossip_messages_to_peers(ctx, per_candidate, metrics, vec![peer], message_iter).await
send_tracked_gossip_messages_to_peers(ctx, per_candidate, metrics, vec![peer], message_iter)
.await
}
async fn send_tracked_gossip_messages_to_peers<Context>(
@@ -478,7 +516,7 @@ where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
if peers.is_empty() {
return Ok(())
return Ok(());
}
for message in message_iter {
for peer in peers.iter() {
@@ -506,7 +544,7 @@ where
),
))
.await
.map_err::<Error, _>(Into::into)?;
.map_err(|e| Error::TrackedGossipMessage(e))?;
metrics.on_chunk_distributed();
}
@@ -543,8 +581,7 @@ where
let per_candidate = state.per_candidate.entry(candidate_hash).or_default();
// obtain the relevant chunk indices not sent yet
let messages = ((0 as ValidatorIndex)
..(per_candidate.validators.len() as ValidatorIndex))
let messages = ((0 as ValidatorIndex)..(per_candidate.validators.len() as ValidatorIndex))
.into_iter()
.filter_map(|erasure_chunk_index: ValidatorIndex| {
let message_id = (candidate_hash, erasure_chunk_index);
@@ -567,7 +604,8 @@ where
.cloned()
.collect::<HashSet<_>>();
send_tracked_gossip_messages_to_peer(ctx, per_candidate, metrics, origin.clone(), messages).await?;
send_tracked_gossip_messages_to_peer(ctx, per_candidate, metrics, origin.clone(), messages)
.await?;
}
Ok(())
}
@@ -580,8 +618,13 @@ async fn obtain_our_validator_index(
keystore: SyncCryptoStorePtr,
) -> Option<ValidatorIndex> {
for (idx, validator) in validators.iter().enumerate() {
if CryptoStore::has_keys(&*keystore, &[(validator.to_raw_vec(), PARACHAIN_KEY_TYPE_ID)]).await {
return Some(idx as ValidatorIndex)
if CryptoStore::has_keys(
&*keystore,
&[(validator.to_raw_vec(), PARACHAIN_KEY_TYPE_ID)],
)
.await
{
return Some(idx as ValidatorIndex);
}
}
None
@@ -664,8 +707,13 @@ where
live_candidate.descriptor.relay_parent.clone(),
message.erasure_chunk.index,
message.erasure_chunk.clone(),
).await? {
warn!(target: TARGET, "Failed to store erasure chunk to availability store");
)
.await?
{
warn!(
target: TARGET,
"Failed to store erasure chunk to availability store"
);
}
}
}
@@ -729,7 +777,10 @@ impl AvailabilityDistributionSubsystem {
// work: process incoming messages from the overseer.
let mut state = ProtocolState::default();
loop {
let message = ctx.recv().await.map_err::<Error, _>(Into::into)?;
let message = ctx
.recv()
.await
.map_err(|e| Error::IncomingMessageChannel(e))?;
match message {
FromOverseer::Communication {
msg: AvailabilityDistributionMessage::NetworkBridgeUpdateV1(event),
@@ -740,7 +791,9 @@ impl AvailabilityDistributionSubsystem {
&mut state,
&self.metrics,
event,
).await {
)
.await
{
warn!(
target: TARGET,
"Failed to handle incomming network messages: {:?}", e
@@ -767,9 +820,15 @@ where
Context: SubsystemContext<Message = AvailabilityDistributionMessage> + Sync + Send,
{
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self
.run(ctx)
.map_err(|e| SubsystemError::with_origin("availability-distribution", e))
.map(|_| ())
.boxed();
SpawnedSubsystem {
name: "availability-distribution-subsystem",
future: Box::pin(async move { self.run(ctx) }.map(|_| ())),
future,
}
}
}
@@ -816,7 +875,6 @@ where
HashMap::<Hash, (Hash, CommittedCandidateReceipt)>::with_capacity(capacity);
for relay_parent in iter {
// register one of relay parents (not the ancestors)
let mut ancestors = query_up_to_k_ancestors_in_same_session(
ctx,
@@ -827,7 +885,6 @@ where
ancestors.push(relay_parent);
// ancestors might overlap, so check the cache too
let unknown = ancestors
.into_iter()
@@ -841,10 +898,7 @@ where
// directly extend the live_candidates with the cached value
live_candidates.extend(receipts.into_iter().map(
|(receipt_hash, receipt)| {
(
relay_parent,
(receipt_hash.clone(), receipt.clone()),
)
(relay_parent, (receipt_hash.clone(), receipt.clone()))
},
));
Some(())
@@ -877,10 +931,12 @@ where
RuntimeApiRequest::AvailabilityCores(tx),
)))
.await
.map_err::<Error, _>(Into::into)?;
.map_err(|e| Error::AvailabilityCoresSendQuery(e))?;
let all_para_ids: Vec<_> = rx
.await??;
.await
.map_err(|e| Error::AvailabilityCoresResponseChannel(e))?
.map_err(|e| Error::AvailabilityCores(e))?;
let occupied_para_ids = all_para_ids
.into_iter()
@@ -910,14 +966,11 @@ where
NetworkBridgeMessage::ReportPeer(peer, rep),
))
.await
.map_err::<Error, _>(Into::into)
.map_err(|e| Error::ReportPeerMessageSend(e))
}
/// Query the proof of validity for a particular candidate hash.
async fn query_data_availability<Context>(
ctx: &mut Context,
candidate_hash: Hash,
) -> Result<bool>
async fn query_data_availability<Context>(ctx: &mut Context, candidate_hash: Hash) -> Result<bool>
where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
@@ -925,11 +978,12 @@ where
ctx.send_message(AllMessages::AvailabilityStore(
AvailabilityStoreMessage::QueryDataAvailability(candidate_hash, tx),
))
.await?;
rx.await.map_err::<Error, _>(Into::into)
.await
.map_err(|e| Error::QueryAvailabilitySendQuery(e))?;
rx.await
.map_err(|e| Error::QueryAvailabilityResponseChannel(e))
}
async fn query_chunk<Context>(
ctx: &mut Context,
candidate_hash: Hash,
@@ -940,13 +994,13 @@ where
{
let (tx, rx) = oneshot::channel();
ctx.send_message(AllMessages::AvailabilityStore(
AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx),
))
.await?;
rx.await.map_err::<Error, _>(Into::into)
AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx),
))
.await
.map_err(|e| Error::QueryChunkSendQuery(e))?;
rx.await.map_err(|e| Error::QueryChunkResponseChannel(e))
}
async fn store_chunk<Context>(
ctx: &mut Context,
candidate_hash: Hash,
@@ -958,16 +1012,19 @@ where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
let (tx, rx) = oneshot::channel();
ctx.send_message(AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreChunk {
candidate_hash,
relay_parent,
validator_index,
chunk: erasure_chunk,
tx,
}
)).await?;
rx.await.map_err::<Error, _>(Into::into)
ctx.send_message(
AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreChunk {
candidate_hash,
relay_parent,
validator_index,
chunk: erasure_chunk,
tx,
}
)).await
.map_err(|e| Error::StoreChunkSendQuery(e))?;
rx.await.map_err(|e| Error::StoreChunkResponseChannel(e))
}
/// Request the head data for a particular para.
@@ -981,12 +1038,15 @@ where
{
let (tx, rx) = oneshot::channel();
ctx.send_message(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::CandidatePendingAvailability(para, tx),
)))
.await?;
rx.await?
.map_err::<Error, _>(Into::into)
relay_parent,
RuntimeApiRequest::CandidatePendingAvailability(para, tx),
)))
.await
.map_err(|e| Error::QueryPendingAvailabilitySendQuery(e))?;
rx.await
.map_err(|e| Error::QueryPendingAvailabilityResponseChannel(e))?
.map_err(|e| Error::QueryPendingAvailability(e))
}
/// Query the validator set.
@@ -1004,9 +1064,11 @@ where
));
ctx.send_message(query_validators)
.await?;
rx.await?
.map_err::<Error, _>(Into::into)
.await
.map_err(|e| Error::QueryValidatorsSendQuery(e))?;
rx.await
.map_err(|e| Error::QueryValidatorsResponseChannel(e))?
.map_err(|e| Error::QueryValidators(e))
}
/// Query the hash of the `K` ancestors
@@ -1026,9 +1088,11 @@ where
});
ctx.send_message(query_ancestors)
.await?;
rx.await?
.map_err::<Error, _>(Into::into)
.await
.map_err(|e| Error::QueryAncestorsSendQuery(e))?;
rx.await
.map_err(|e| Error::QueryAncestorsResponseChannel(e))?
.map_err(|e| Error::QueryAncestors(e))
}
/// Query the session index of a relay parent
@@ -1046,9 +1110,11 @@ where
));
ctx.send_message(query_session_idx_for_child)
.await?;
rx.await?
.map_err::<Error, _>(Into::into)
.await
.map_err(|e| Error::QuerySessionSendQuery(e))?;
rx.await
.map_err(|e| Error::QuerySessionResponseChannel(e))?
.map_err(|e| Error::QuerySession(e))
}
/// Queries up to k ancestors with the constraints of equiv session
@@ -1089,7 +1155,6 @@ where
Ok(acc)
}
#[derive(Clone)]
struct MetricsInner {
gossipped_availability_chunks: prometheus::Counter<prometheus::U64>,
@@ -1108,12 +1173,14 @@ impl Metrics {
}
impl metrics::Metrics for Metrics {
fn try_register(registry: &prometheus::Registry) -> std::result::Result<Self, prometheus::PrometheusError> {
fn try_register(
registry: &prometheus::Registry,
) -> std::result::Result<Self, prometheus::PrometheusError> {
let metrics = MetricsInner {
gossipped_availability_chunks: prometheus::register(
prometheus::Counter::new(
"parachain_gossipped_availability_chunks_total",
"Number of availability chunks gossipped to other peers."
"Number of availability chunks gossipped to other peers.",
)?,
registry,
)?,
@@ -17,22 +17,21 @@
use super::*;
use assert_matches::assert_matches;
use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks};
use polkadot_node_network_protocol::ObservedRole;
use polkadot_node_subsystem_util::TimeoutExt;
use polkadot_primitives::v1::{
AvailableData, BlockData, CandidateCommitments, CandidateDescriptor, GroupIndex,
GroupRotationInfo, HeadData, PersistedValidationData, OccupiedCore,
PoV, ScheduledCore,
GroupRotationInfo, HeadData, OccupiedCore, PersistedValidationData, PoV, ScheduledCore,
};
use polkadot_subsystem_testhelpers::{self as test_helpers};
use polkadot_node_subsystem_util::TimeoutExt;
use polkadot_node_network_protocol::ObservedRole;
use futures::{executor, future, Future};
use futures_timer::Delay;
use smallvec::smallvec;
use std::{sync::Arc, time::Duration};
use sc_keystore::LocalKeystore;
use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore};
use smallvec::smallvec;
use sp_application_crypto::AppKey;
use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
use std::{sync::Arc, time::Duration};
macro_rules! view {
( $( $hash:expr ),* $(,)? ) => [
@@ -46,9 +45,9 @@ macro_rules! delay {
};
}
fn chunk_protocol_message(message: AvailabilityGossipMessage)
-> protocol_v1::AvailabilityDistributionMessage
{
fn chunk_protocol_message(
message: AvailabilityGossipMessage,
) -> protocol_v1::AvailabilityDistributionMessage {
protocol_v1::AvailabilityDistributionMessage::Chunk(
message.candidate_hash,
message.erasure_chunk,
@@ -175,8 +174,12 @@ impl Default for TestState {
let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
SyncCryptoStore::sr25519_generate_new(&*keystore, ValidatorId::ID, Some(&validators[0].to_seed()))
.expect("Insert key into keystore");
SyncCryptoStore::sr25519_generate_new(
&*keystore,
ValidatorId::ID,
Some(&validators[0].to_seed()),
)
.expect("Insert key into keystore");
let validator_public = validator_pubkeys(&validators);
@@ -867,10 +870,7 @@ fn reputation_verification() {
overseer_send(
&mut virtual_overseer,
AvailabilityDistributionMessage::NetworkBridgeUpdateV1(
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
chunk_protocol_message(valid2),
),
NetworkBridgeEvent::PeerMessage(peer_a.clone(), chunk_protocol_message(valid2)),
),
)
.await;
@@ -6,17 +6,12 @@ edition = "2018"
[dependencies]
futures = "0.3.5"
futures-timer = "3.0.2"
log = "0.4.8"
streamunordered = "0.5.1"
log = "0.4.11"
codec = { package="parity-scale-codec", version = "1.3.4" }
node-primitives = { package = "polkadot-node-primitives", path = "../../primitives" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
polkadot-network-bridge = { path = "../../network/bridge" }
polkadot-node-network-protocol = { path = "../../network/protocol" }
sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
[dev-dependencies]
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
@@ -25,9 +20,7 @@ sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
parking_lot = "0.11.0"
maplit = "1.0.2"
smol = "0.3.3"
env_logger = "0.7.1"
assert_matches = "1.3.0"
tempfile = "3.1.0"
@@ -20,8 +20,10 @@
//! for a particular relay parent.
//! Independently of that, gossips on received messages from peers to other interested peers.
#![deny(unused_crate_dependencies)]
use codec::{Decode, Encode};
use futures::{channel::oneshot, FutureExt};
use futures::{channel::oneshot, FutureExt, TryFutureExt};
use log::{trace, warn};
use polkadot_subsystem::messages::*;
@@ -33,6 +35,7 @@ use polkadot_node_subsystem_util::{
};
use polkadot_primitives::v1::{Hash, SignedAvailabilityBitfield, SigningContext, ValidatorId};
use polkadot_node_network_protocol::{v1 as protocol_v1, PeerId, NetworkBridgeEvent, View, ReputationChange};
use polkadot_subsystem::SubsystemError;
use std::collections::{HashMap, HashSet};
const COST_SIGNATURE_INVALID: ReputationChange =
@@ -578,9 +581,15 @@ where
C: SubsystemContext<Message = BitfieldDistributionMessage> + Sync + Send,
{
fn start(self, ctx: C) -> SpawnedSubsystem {
let future = self.run(ctx)
.map_err(|e| {
SubsystemError::with_origin("bitfield-distribution", e)
})
.map(|_| ()).boxed();
SpawnedSubsystem {
name: "bitfield-distribution-subsystem",
future: Box::pin(async move { Self::run(self, ctx) }.map(|_| ())),
future,
}
}
}
+1 -3
View File
@@ -7,9 +7,7 @@ edition = "2018"
[dependencies]
async-trait = "0.1"
futures = "0.3.5"
log = "0.4.8"
futures-timer = "3.0.2"
streamunordered = "0.5.1"
log = "0.4.11"
polkadot-primitives = { path = "../../../primitives" }
parity-scale-codec = "1.3.4"
sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" }
+16 -6
View File
@@ -16,6 +16,10 @@
//! The Network Bridge Subsystem - protocol multiplexer for Polkadot.
#![deny(unused_crate_dependencies, unused_results)]
#![warn(missing_docs)]
use parity_scale_codec::{Encode, Decode};
use futures::prelude::*;
use futures::future::BoxFuture;
@@ -219,13 +223,19 @@ impl<Net, AD, Context> Subsystem<Context> for NetworkBridge<Net, AD>
// Swallow error because failure is fatal to the node and we log with more precision
// within `run_network`.
let Self { network_service, authority_discovery_service } = self;
SpawnedSubsystem {
name: "network-bridge-subsystem",
future: run_network(
let future = run_network(
network_service,
authority_discovery_service,
ctx,
).map(|_| ()).boxed(),
)
.map_err(|e| {
SubsystemError::with_origin("network-bridge", e)
})
.map(|_| ())
.boxed();
SpawnedSubsystem {
name: "network-bridge-subsystem",
future,
}
}
}
@@ -654,7 +664,7 @@ where
match peer_map.entry(peer.clone()) {
hash_map::Entry::Occupied(_) => continue,
hash_map::Entry::Vacant(vacant) => {
vacant.insert(PeerData {
let _ = vacant.insert(PeerData {
view: View(Vec::new()),
});
@@ -937,7 +947,7 @@ mod tests {
futures::pin_mut!(test_fut);
futures::pin_mut!(network_bridge);
executor::block_on(future::select(test_fut, network_bridge));
let _ = executor::block_on(future::select(test_fut, network_bridge));
}
async fn assert_sends_validation_event_to_all(
@@ -192,7 +192,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
Err(e) if e.is_disconnected() => {
// the request is already revoked
for peer_id in validator_ids {
on_revoke(&mut self.requested_validators, peer_id);
let _ = on_revoke(&mut self.requested_validators, peer_id);
}
return (network_service, authority_discovery_service);
}
@@ -217,7 +217,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
// They are going to be removed soon though:
// https://github.com/paritytech/substrate/issues/6845
for addr in addresses.into_iter().take(MAX_ADDR_PER_PEER) {
multiaddr_to_add.insert(addr);
let _ = multiaddr_to_add.insert(addr);
}
}
}
@@ -247,7 +247,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
let result = authority_discovery_service.get_addresses_by_authority_id(id).await;
if let Some(addresses) = result {
for addr in addresses.into_iter().take(MAX_ADDR_PER_PEER) {
multiaddr_to_remove.insert(addr);
let _ = multiaddr_to_remove.insert(addr);
}
}
}
@@ -283,16 +283,16 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
let maybe_authority = authority_discovery_service.get_authority_id_by_peer_id(peer_id.clone()).await;
if let Some(authority) = maybe_authority {
for request in self.non_revoked_discovery_requests.iter_mut() {
request.on_authority_connected(&authority, peer_id);
let _ = request.on_authority_connected(&authority, peer_id);
}
self.connected_validators.insert(authority, peer_id.clone());
let _ = self.connected_validators.insert(authority, peer_id.clone());
}
}
pub async fn on_peer_disconnected(&mut self, peer_id: &PeerId, authority_discovery_service: &mut AD) {
let maybe_authority = authority_discovery_service.get_authority_id_by_peer_id(peer_id.clone()).await;
if let Some(authority) = maybe_authority {
self.connected_validators.remove(&authority);
let _ = self.connected_validators.remove(&authority);
}
}
}
@@ -7,12 +7,10 @@ edition = "2018"
[dependencies]
futures = "0.3.5"
log = "0.4.11"
derive_more = "0.99.9"
thiserror = "1.0.21"
codec = { package="parity-scale-codec", version = "1.3.4", features = ["std"] }
polkadot-primitives = { path = "../../../primitives" }
polkadot-network-bridge = { path = "../../network/bridge" }
polkadot-node-network-protocol = { path = "../../network/protocol" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
@@ -20,7 +18,6 @@ polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsys
[dev-dependencies]
env_logger = "0.7.1"
assert_matches = "1.3.0"
smol-timeout = "0.1.0"
smallvec = "1.4.2"
futures-timer = "3.0.2"
@@ -17,11 +17,12 @@
//! The Collator Protocol allows collators and validators talk to each other.
//! This subsystem implements both sides of the collator protocol.
#![deny(missing_docs)]
#![deny(missing_docs, unused_crate_dependencies)]
use std::time::Duration;
use futures::{channel::oneshot, FutureExt};
use log::trace;
use thiserror::Error;
use polkadot_subsystem::{
Subsystem, SubsystemContext, SubsystemError, SpawnedSubsystem,
@@ -45,18 +46,18 @@ mod validator_side;
const TARGET: &'static str = "colp";
const REQUEST_TIMEOUT: Duration = Duration::from_secs(1);
#[derive(Debug, derive_more::From)]
#[derive(Debug, Error)]
enum Error {
#[from]
Subsystem(SubsystemError),
#[from]
Oneshot(oneshot::Canceled),
#[from]
RuntimeApi(RuntimeApiError),
#[from]
UtilError(util::Error),
#[from]
Prometheus(prometheus::PrometheusError),
#[error(transparent)]
Subsystem(#[from] SubsystemError),
#[error(transparent)]
Oneshot(#[from] oneshot::Canceled),
#[error(transparent)]
RuntimeApi(#[from] RuntimeApiError),
#[error(transparent)]
UtilError(#[from] util::Error),
#[error(transparent)]
Prometheus(#[from] prometheus::PrometheusError),
}
impl From<util::validator_discovery::Error> for Error {
@@ -113,7 +114,9 @@ impl CollatorProtocolSubsystem {
id,
metrics,
).await,
}
}.map_err(|e| {
SubsystemError::with_origin("collator-protocol", e).into()
})
}
}
@@ -6,19 +6,13 @@ edition = "2018"
[dependencies]
futures = "0.3.5"
log = "0.4.8"
futures-timer = "3.0.2"
streamunordered = "0.5.1"
log = "0.4.11"
polkadot-primitives = { path = "../../../primitives" }
node-primitives = { package = "polkadot-node-primitives", path = "../../primitives" }
parity-scale-codec = "1.3.4"
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
polkadot-node-network-protocol = { path = "../../network/protocol" }
[dev-dependencies]
parking_lot = "0.10.0"
assert_matches = "1.3.0"
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
+11 -917
View File
@@ -19,9 +19,13 @@
//! This is a gossip implementation of code that is responsible for distributing PoVs
//! among validators.
#![deny(unused_crate_dependencies)]
#![warn(missing_docs)]
use polkadot_primitives::v1::{Hash, PoV, CandidateDescriptor};
use polkadot_subsystem::{
ActiveLeavesUpdate, OverseerSignal, SubsystemContext, Subsystem, SubsystemResult, FromOverseer, SpawnedSubsystem,
ActiveLeavesUpdate, OverseerSignal, SubsystemContext, Subsystem, SubsystemResult, SubsystemError,
FromOverseer, SpawnedSubsystem,
messages::{
PoVDistributionMessage, RuntimeApiMessage, RuntimeApiRequest, AllMessages, NetworkBridgeMessage,
},
@@ -60,9 +64,13 @@ impl<C> Subsystem<C> for PoVDistribution
fn start(self, ctx: C) -> SpawnedSubsystem {
// Swallow error because failure is fatal to the node and we log with more precision
// within `run`.
let future = self.run(ctx)
.map_err(|e| SubsystemError::with_origin("pov-distribution", e))
.map(|_| ())
.boxed();
SpawnedSubsystem {
name: "pov-distribution-subsystem",
future: self.run(ctx).map(|_| ()).boxed(),
future,
}
}
}
@@ -608,918 +616,4 @@ impl metrics::Metrics for Metrics {
}
#[cfg(test)]
mod tests {
use super::*;
use futures::executor;
use polkadot_primitives::v1::BlockData;
use assert_matches::assert_matches;
fn make_pov(data: Vec<u8>) -> PoV {
PoV { block_data: BlockData(data) }
}
fn make_peer_state(awaited: Vec<(Hash, Vec<Hash>)>)
-> PeerState
{
PeerState {
awaited: awaited.into_iter().map(|(rp, h)| (rp, h.into_iter().collect())).collect()
}
}
#[test]
fn distributes_to_those_awaiting_and_completes_local() {
let hash_a: Hash = [0; 32].into();
let hash_b: Hash = [1; 32].into();
let peer_a = PeerId::random();
let peer_b = PeerId::random();
let peer_c = PeerId::random();
let (pov_send, pov_recv) = oneshot::channel();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let mut b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
b.fetching.insert(pov_hash, vec![pov_send]);
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
// peer A has hash_a in its view and is awaiting the PoV.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![pov_hash])]),
);
// peer B has hash_a in its view but is not awaiting.
s.insert(
peer_b.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
// peer C doesn't have hash_a in its view but is awaiting the PoV under hash_b.
s.insert(
peer_c.clone(),
make_peer_state(vec![(hash_b, vec![pov_hash])]),
);
s
},
our_view: View(vec![hash_a, hash_b]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
let mut descriptor = CandidateDescriptor::default();
descriptor.pov_hash = pov_hash;
executor::block_on(async move {
handle_distribute(
&mut state,
&mut ctx,
hash_a,
descriptor,
Arc::new(pov.clone()),
).await.unwrap();
assert!(!state.peer_state[&peer_a].awaited[&hash_a].contains(&pov_hash));
assert!(state.peer_state[&peer_c].awaited[&hash_b].contains(&pov_hash));
// our local sender also completed
assert_eq!(&*pov_recv.await.unwrap(), &pov);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::SendValidationMessage(peers, message)
) => {
assert_eq!(peers, vec![peer_a.clone()]);
assert_eq!(
message,
send_pov_message(hash_a, pov_hash, pov.clone()),
);
}
)
});
}
#[test]
fn we_inform_peers_with_same_view_we_are_awaiting() {
let hash_a: Hash = [0; 32].into();
let hash_b: Hash = [1; 32].into();
let peer_a = PeerId::random();
let peer_b = PeerId::random();
let (pov_send, _) = oneshot::channel();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
// peer A has hash_a in its view.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
// peer B doesn't have hash_a in its view.
s.insert(
peer_b.clone(),
make_peer_state(vec![(hash_b, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
let mut descriptor = CandidateDescriptor::default();
descriptor.pov_hash = pov_hash;
executor::block_on(async move {
handle_fetch(
&mut state,
&mut ctx,
hash_a,
descriptor,
pov_send,
).await.unwrap();
assert_eq!(state.relay_parent_state[&hash_a].fetching[&pov_hash].len(), 1);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::SendValidationMessage(peers, message)
) => {
assert_eq!(peers, vec![peer_a.clone()]);
assert_eq!(
message,
awaiting_message(hash_a, vec![pov_hash]),
);
}
)
});
}
#[test]
fn peer_view_change_leads_to_us_informing() {
let hash_a: Hash = [0; 32].into();
let hash_b: Hash = [1; 32].into();
let peer_a = PeerId::random();
let (pov_a_send, _) = oneshot::channel();
let pov_a = make_pov(vec![1, 2, 3]);
let pov_a_hash = pov_a.hash();
let pov_b = make_pov(vec![4, 5, 6]);
let pov_b_hash = pov_b.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let mut b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
// pov_a is still being fetched, whereas the fetch of pov_b has already
// completed, as implied by the empty vector.
b.fetching.insert(pov_a_hash, vec![pov_a_send]);
b.fetching.insert(pov_b_hash, vec![]);
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
// peer A doesn't yet have hash_a in its view.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_b, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerViewChange(peer_a.clone(), View(vec![hash_a, hash_b])),
).await.unwrap();
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::SendValidationMessage(peers, message)
) => {
assert_eq!(peers, vec![peer_a.clone()]);
assert_eq!(
message,
awaiting_message(hash_a, vec![pov_a_hash]),
);
}
)
});
}
#[test]
fn peer_complete_fetch_and_is_rewarded() {
let hash_a: Hash = [0; 32].into();
let peer_a = PeerId::random();
let peer_b = PeerId::random();
let (pov_send, pov_recv) = oneshot::channel();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let mut b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
// pov is being fetched.
b.fetching.insert(pov_hash, vec![pov_send]);
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
// peers A and B are functionally the same.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s.insert(
peer_b.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
// Peer A answers our request before peer B.
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
send_pov_message(hash_a, pov_hash, pov.clone()),
).focus().unwrap(),
).await.unwrap();
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_b.clone(),
send_pov_message(hash_a, pov_hash, pov.clone()),
).focus().unwrap(),
).await.unwrap();
assert_eq!(&*pov_recv.await.unwrap(), &pov);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, BENEFIT_FRESH_POV);
}
);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_b);
assert_eq!(rep, BENEFIT_LATE_POV);
}
);
});
}
#[test]
fn peer_punished_for_sending_bad_pov() {
let hash_a: Hash = [0; 32].into();
let peer_a = PeerId::random();
let (pov_send, _) = oneshot::channel();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let bad_pov = make_pov(vec![6, 6, 6]);
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let mut b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
// pov is being fetched.
b.fetching.insert(pov_hash, vec![pov_send]);
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
// Peer A answers our request: right relay parent, awaited hash, wrong PoV.
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
send_pov_message(hash_a, pov_hash, bad_pov.clone()),
).focus().unwrap(),
).await.unwrap();
// didn't complete our sender.
assert_eq!(state.relay_parent_state[&hash_a].fetching[&pov_hash].len(), 1);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, COST_UNEXPECTED_POV);
}
);
});
}
#[test]
fn peer_punished_for_sending_unexpected_pov() {
let hash_a: Hash = [0; 32].into();
let peer_a = PeerId::random();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
// Peer A answers our request: right relay parent, awaited hash, wrong PoV.
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
send_pov_message(hash_a, pov_hash, pov.clone()),
).focus().unwrap(),
).await.unwrap();
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, COST_UNEXPECTED_POV);
}
);
});
}
#[test]
fn peer_punished_for_sending_pov_out_of_our_view() {
let hash_a: Hash = [0; 32].into();
let hash_b: Hash = [1; 32].into();
let peer_a = PeerId::random();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
// Peer A answers our request: right relay parent, awaited hash, wrong PoV.
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
send_pov_message(hash_b, pov_hash, pov.clone()),
).focus().unwrap(),
).await.unwrap();
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, COST_UNEXPECTED_POV);
}
);
});
}
#[test]
fn peer_reported_for_awaiting_too_much() {
let hash_a: Hash = [0; 32].into();
let peer_a = PeerId::random();
let n_validators = 10;
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators,
};
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
let max_plausibly_awaited = n_validators * 2;
// The peer awaits a plausible (albeit unlikely) amount of PoVs.
for i in 0..max_plausibly_awaited {
let pov_hash = make_pov(vec![i as u8; 32]).hash();
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
awaiting_message(hash_a, vec![pov_hash]),
).focus().unwrap(),
).await.unwrap();
}
assert_eq!(state.peer_state[&peer_a].awaited[&hash_a].len(), max_plausibly_awaited);
// The last straw:
let last_pov_hash = make_pov(vec![max_plausibly_awaited as u8; 32]).hash();
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
awaiting_message(hash_a, vec![last_pov_hash]),
).focus().unwrap(),
).await.unwrap();
// No more bookkeeping for you!
assert_eq!(state.peer_state[&peer_a].awaited[&hash_a].len(), max_plausibly_awaited);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, COST_APPARENT_FLOOD);
}
);
});
}
#[test]
fn peer_reported_for_awaiting_outside_their_view() {
let hash_a: Hash = [0; 32].into();
let hash_b: Hash = [1; 32].into();
let peer_a = PeerId::random();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
s.insert(hash_a, BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
});
s.insert(hash_b, BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
});
s
},
peer_state: {
let mut s = HashMap::new();
// Peer has only hash A in its view.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s
},
our_view: View(vec![hash_a, hash_b]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
let pov_hash = make_pov(vec![1, 2, 3]).hash();
// Hash B is in our view but not the peer's
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
awaiting_message(hash_b, vec![pov_hash]),
).focus().unwrap(),
).await.unwrap();
assert!(state.peer_state[&peer_a].awaited.get(&hash_b).is_none());
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, COST_AWAITED_NOT_IN_VIEW);
}
);
});
}
#[test]
fn peer_reported_for_awaiting_outside_our_view() {
let hash_a: Hash = [0; 32].into();
let hash_b: Hash = [1; 32].into();
let peer_a = PeerId::random();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
s.insert(hash_a, BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
});
s
},
peer_state: {
let mut s = HashMap::new();
// Peer has hashes A and B in their view.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![]), (hash_b, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
let pov_hash = make_pov(vec![1, 2, 3]).hash();
// Hash B is in peer's view but not ours.
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
awaiting_message(hash_b, vec![pov_hash]),
).focus().unwrap(),
).await.unwrap();
// Illegal `awaited` is ignored.
assert!(state.peer_state[&peer_a].awaited[&hash_b].is_empty());
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, COST_AWAITED_NOT_IN_VIEW);
}
);
});
}
#[test]
fn peer_complete_fetch_leads_to_us_completing_others() {
let hash_a: Hash = [0; 32].into();
let peer_a = PeerId::random();
let peer_b = PeerId::random();
let (pov_send, pov_recv) = oneshot::channel();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let mut b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
// pov is being fetched.
b.fetching.insert(pov_hash, vec![pov_send]);
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
// peer B is awaiting peer A's request.
s.insert(
peer_b.clone(),
make_peer_state(vec![(hash_a, vec![pov_hash])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
send_pov_message(hash_a, pov_hash, pov.clone()),
).focus().unwrap(),
).await.unwrap();
assert_eq!(&*pov_recv.await.unwrap(), &pov);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, BENEFIT_FRESH_POV);
}
);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::SendValidationMessage(peers, message)
) => {
assert_eq!(peers, vec![peer_b.clone()]);
assert_eq!(
message,
send_pov_message(hash_a, pov_hash, pov.clone()),
);
}
);
assert!(!state.peer_state[&peer_b].awaited[&hash_a].contains(&pov_hash));
});
}
#[test]
fn peer_completing_request_no_longer_awaiting() {
let hash_a: Hash = [0; 32].into();
let peer_a = PeerId::random();
let (pov_send, pov_recv) = oneshot::channel();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let mut b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
// pov is being fetched.
b.fetching.insert(pov_hash, vec![pov_send]);
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
// peer A is registered as awaiting.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![pov_hash])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
send_pov_message(hash_a, pov_hash, pov.clone()),
).focus().unwrap(),
).await.unwrap();
assert_eq!(&*pov_recv.await.unwrap(), &pov);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, BENEFIT_FRESH_POV);
}
);
// We received the PoV from peer A, so we do not consider it awaited by peer A anymore.
assert!(!state.peer_state[&peer_a].awaited[&hash_a].contains(&pov_hash));
});
}
}
mod tests;
@@ -0,0 +1,913 @@
use super::*;
use futures::executor;
use polkadot_primitives::v1::BlockData;
use assert_matches::assert_matches;
fn make_pov(data: Vec<u8>) -> PoV {
PoV { block_data: BlockData(data) }
}
fn make_peer_state(awaited: Vec<(Hash, Vec<Hash>)>)
-> PeerState
{
PeerState {
awaited: awaited.into_iter().map(|(rp, h)| (rp, h.into_iter().collect())).collect()
}
}
#[test]
fn distributes_to_those_awaiting_and_completes_local() {
let hash_a: Hash = [0; 32].into();
let hash_b: Hash = [1; 32].into();
let peer_a = PeerId::random();
let peer_b = PeerId::random();
let peer_c = PeerId::random();
let (pov_send, pov_recv) = oneshot::channel();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let mut b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
b.fetching.insert(pov_hash, vec![pov_send]);
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
// peer A has hash_a in its view and is awaiting the PoV.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![pov_hash])]),
);
// peer B has hash_a in its view but is not awaiting.
s.insert(
peer_b.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
// peer C doesn't have hash_a in its view but is awaiting the PoV under hash_b.
s.insert(
peer_c.clone(),
make_peer_state(vec![(hash_b, vec![pov_hash])]),
);
s
},
our_view: View(vec![hash_a, hash_b]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
let mut descriptor = CandidateDescriptor::default();
descriptor.pov_hash = pov_hash;
executor::block_on(async move {
handle_distribute(
&mut state,
&mut ctx,
hash_a,
descriptor,
Arc::new(pov.clone()),
).await.unwrap();
assert!(!state.peer_state[&peer_a].awaited[&hash_a].contains(&pov_hash));
assert!(state.peer_state[&peer_c].awaited[&hash_b].contains(&pov_hash));
// our local sender also completed
assert_eq!(&*pov_recv.await.unwrap(), &pov);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::SendValidationMessage(peers, message)
) => {
assert_eq!(peers, vec![peer_a.clone()]);
assert_eq!(
message,
send_pov_message(hash_a, pov_hash, pov.clone()),
);
}
)
});
}
#[test]
fn we_inform_peers_with_same_view_we_are_awaiting() {
let hash_a: Hash = [0; 32].into();
let hash_b: Hash = [1; 32].into();
let peer_a = PeerId::random();
let peer_b = PeerId::random();
let (pov_send, _) = oneshot::channel();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
// peer A has hash_a in its view.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
// peer B doesn't have hash_a in its view.
s.insert(
peer_b.clone(),
make_peer_state(vec![(hash_b, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
let mut descriptor = CandidateDescriptor::default();
descriptor.pov_hash = pov_hash;
executor::block_on(async move {
handle_fetch(
&mut state,
&mut ctx,
hash_a,
descriptor,
pov_send,
).await.unwrap();
assert_eq!(state.relay_parent_state[&hash_a].fetching[&pov_hash].len(), 1);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::SendValidationMessage(peers, message)
) => {
assert_eq!(peers, vec![peer_a.clone()]);
assert_eq!(
message,
awaiting_message(hash_a, vec![pov_hash]),
);
}
)
});
}
#[test]
fn peer_view_change_leads_to_us_informing() {
let hash_a: Hash = [0; 32].into();
let hash_b: Hash = [1; 32].into();
let peer_a = PeerId::random();
let (pov_a_send, _) = oneshot::channel();
let pov_a = make_pov(vec![1, 2, 3]);
let pov_a_hash = pov_a.hash();
let pov_b = make_pov(vec![4, 5, 6]);
let pov_b_hash = pov_b.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let mut b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
// pov_a is still being fetched, whereas the fetch of pov_b has already
// completed, as implied by the empty vector.
b.fetching.insert(pov_a_hash, vec![pov_a_send]);
b.fetching.insert(pov_b_hash, vec![]);
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
// peer A doesn't yet have hash_a in its view.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_b, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerViewChange(peer_a.clone(), View(vec![hash_a, hash_b])),
).await.unwrap();
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::SendValidationMessage(peers, message)
) => {
assert_eq!(peers, vec![peer_a.clone()]);
assert_eq!(
message,
awaiting_message(hash_a, vec![pov_a_hash]),
);
}
)
});
}
#[test]
fn peer_complete_fetch_and_is_rewarded() {
let hash_a: Hash = [0; 32].into();
let peer_a = PeerId::random();
let peer_b = PeerId::random();
let (pov_send, pov_recv) = oneshot::channel();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let mut b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
// pov is being fetched.
b.fetching.insert(pov_hash, vec![pov_send]);
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
// peers A and B are functionally the same.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s.insert(
peer_b.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
// Peer A answers our request before peer B.
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
send_pov_message(hash_a, pov_hash, pov.clone()),
).focus().unwrap(),
).await.unwrap();
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_b.clone(),
send_pov_message(hash_a, pov_hash, pov.clone()),
).focus().unwrap(),
).await.unwrap();
assert_eq!(&*pov_recv.await.unwrap(), &pov);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, BENEFIT_FRESH_POV);
}
);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_b);
assert_eq!(rep, BENEFIT_LATE_POV);
}
);
});
}
#[test]
fn peer_punished_for_sending_bad_pov() {
let hash_a: Hash = [0; 32].into();
let peer_a = PeerId::random();
let (pov_send, _) = oneshot::channel();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let bad_pov = make_pov(vec![6, 6, 6]);
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let mut b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
// pov is being fetched.
b.fetching.insert(pov_hash, vec![pov_send]);
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
// Peer A answers our request: right relay parent, awaited hash, wrong PoV.
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
send_pov_message(hash_a, pov_hash, bad_pov.clone()),
).focus().unwrap(),
).await.unwrap();
// didn't complete our sender.
assert_eq!(state.relay_parent_state[&hash_a].fetching[&pov_hash].len(), 1);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, COST_UNEXPECTED_POV);
}
);
});
}
#[test]
fn peer_punished_for_sending_unexpected_pov() {
let hash_a: Hash = [0; 32].into();
let peer_a = PeerId::random();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
// Peer A answers our request: right relay parent, awaited hash, wrong PoV.
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
send_pov_message(hash_a, pov_hash, pov.clone()),
).focus().unwrap(),
).await.unwrap();
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, COST_UNEXPECTED_POV);
}
);
});
}
#[test]
fn peer_punished_for_sending_pov_out_of_our_view() {
let hash_a: Hash = [0; 32].into();
let hash_b: Hash = [1; 32].into();
let peer_a = PeerId::random();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
// Peer A answers our request: right relay parent, awaited hash, wrong PoV.
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
send_pov_message(hash_b, pov_hash, pov.clone()),
).focus().unwrap(),
).await.unwrap();
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, COST_UNEXPECTED_POV);
}
);
});
}
#[test]
fn peer_reported_for_awaiting_too_much() {
let hash_a: Hash = [0; 32].into();
let peer_a = PeerId::random();
let n_validators = 10;
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators,
};
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
let max_plausibly_awaited = n_validators * 2;
// The peer awaits a plausible (albeit unlikely) amount of PoVs.
for i in 0..max_plausibly_awaited {
let pov_hash = make_pov(vec![i as u8; 32]).hash();
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
awaiting_message(hash_a, vec![pov_hash]),
).focus().unwrap(),
).await.unwrap();
}
assert_eq!(state.peer_state[&peer_a].awaited[&hash_a].len(), max_plausibly_awaited);
// The last straw:
let last_pov_hash = make_pov(vec![max_plausibly_awaited as u8; 32]).hash();
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
awaiting_message(hash_a, vec![last_pov_hash]),
).focus().unwrap(),
).await.unwrap();
// No more bookkeeping for you!
assert_eq!(state.peer_state[&peer_a].awaited[&hash_a].len(), max_plausibly_awaited);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, COST_APPARENT_FLOOD);
}
);
});
}
#[test]
fn peer_reported_for_awaiting_outside_their_view() {
let hash_a: Hash = [0; 32].into();
let hash_b: Hash = [1; 32].into();
let peer_a = PeerId::random();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
s.insert(hash_a, BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
});
s.insert(hash_b, BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
});
s
},
peer_state: {
let mut s = HashMap::new();
// Peer has only hash A in its view.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
s
},
our_view: View(vec![hash_a, hash_b]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
let pov_hash = make_pov(vec![1, 2, 3]).hash();
// Hash B is in our view but not the peer's
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
awaiting_message(hash_b, vec![pov_hash]),
).focus().unwrap(),
).await.unwrap();
assert!(state.peer_state[&peer_a].awaited.get(&hash_b).is_none());
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, COST_AWAITED_NOT_IN_VIEW);
}
);
});
}
#[test]
fn peer_reported_for_awaiting_outside_our_view() {
let hash_a: Hash = [0; 32].into();
let hash_b: Hash = [1; 32].into();
let peer_a = PeerId::random();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
s.insert(hash_a, BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
});
s
},
peer_state: {
let mut s = HashMap::new();
// Peer has hashes A and B in their view.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![]), (hash_b, vec![])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
let pov_hash = make_pov(vec![1, 2, 3]).hash();
// Hash B is in peer's view but not ours.
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
awaiting_message(hash_b, vec![pov_hash]),
).focus().unwrap(),
).await.unwrap();
// Illegal `awaited` is ignored.
assert!(state.peer_state[&peer_a].awaited[&hash_b].is_empty());
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, COST_AWAITED_NOT_IN_VIEW);
}
);
});
}
#[test]
fn peer_complete_fetch_leads_to_us_completing_others() {
let hash_a: Hash = [0; 32].into();
let peer_a = PeerId::random();
let peer_b = PeerId::random();
let (pov_send, pov_recv) = oneshot::channel();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let mut b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
// pov is being fetched.
b.fetching.insert(pov_hash, vec![pov_send]);
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![])]),
);
// peer B is awaiting peer A's request.
s.insert(
peer_b.clone(),
make_peer_state(vec![(hash_a, vec![pov_hash])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
send_pov_message(hash_a, pov_hash, pov.clone()),
).focus().unwrap(),
).await.unwrap();
assert_eq!(&*pov_recv.await.unwrap(), &pov);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, BENEFIT_FRESH_POV);
}
);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::SendValidationMessage(peers, message)
) => {
assert_eq!(peers, vec![peer_b.clone()]);
assert_eq!(
message,
send_pov_message(hash_a, pov_hash, pov.clone()),
);
}
);
assert!(!state.peer_state[&peer_b].awaited[&hash_a].contains(&pov_hash));
});
}
#[test]
fn peer_completing_request_no_longer_awaiting() {
let hash_a: Hash = [0; 32].into();
let peer_a = PeerId::random();
let (pov_send, pov_recv) = oneshot::channel();
let pov = make_pov(vec![1, 2, 3]);
let pov_hash = pov.hash();
let mut state = State {
relay_parent_state: {
let mut s = HashMap::new();
let mut b = BlockBasedState {
known: HashMap::new(),
fetching: HashMap::new(),
n_validators: 10,
};
// pov is being fetched.
b.fetching.insert(pov_hash, vec![pov_send]);
s.insert(hash_a, b);
s
},
peer_state: {
let mut s = HashMap::new();
// peer A is registered as awaiting.
s.insert(
peer_a.clone(),
make_peer_state(vec![(hash_a, vec![pov_hash])]),
);
s
},
our_view: View(vec![hash_a]),
metrics: Default::default(),
};
let pool = sp_core::testing::TaskExecutor::new();
let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
executor::block_on(async move {
handle_network_update(
&mut state,
&mut ctx,
NetworkBridgeEvent::PeerMessage(
peer_a.clone(),
send_pov_message(hash_a, pov_hash, pov.clone()),
).focus().unwrap(),
).await.unwrap();
assert_eq!(&*pov_recv.await.unwrap(), &pov);
assert_matches!(
handle.recv().await,
AllMessages::NetworkBridge(
NetworkBridgeMessage::ReportPeer(peer, rep)
) => {
assert_eq!(peer, peer_a);
assert_eq!(rep, BENEFIT_FRESH_POV);
}
);
// We received the PoV from peer A, so we do not consider it awaited by peer A anymore.
assert!(!state.peer_state[&peer_a].awaited[&hash_a].contains(&pov_hash));
});
}
@@ -9,6 +9,4 @@ description = "Primitives types for the Node-side"
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-primitives = { path = "../../primitives" }
parity-scale-codec = { version = "1.3.4", default-features = false, features = ["derive"] }
runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
+13
View File
@@ -16,9 +16,13 @@
//! Network protocol types for parachains.
#![deny(unused_crate_dependencies, unused_results)]
#![warn(missing_docs)]
use polkadot_primitives::v1::Hash;
use parity_scale_codec::{Encode, Decode};
use std::convert::TryFrom;
use std::fmt;
pub use sc_network::{ReputationChange, PeerId};
@@ -32,6 +36,15 @@ pub type ProtocolVersion = u32;
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct WrongVariant;
impl fmt::Display for WrongVariant {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "Wrong message variant")
}
}
impl std::error::Error for WrongVariant {}
/// The peer-sets that the network manages. Different subsystems will use different peer-sets.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PeerSet {
@@ -7,13 +7,9 @@ edition = "2018"
[dependencies]
futures = "0.3.5"
log = "0.4.8"
futures-timer = "3.0.2"
streamunordered = "0.5.1"
log = "0.4.11"
polkadot-primitives = { path = "../../../primitives" }
node-primitives = { package = "polkadot-node-primitives", path = "../../primitives" }
parity-scale-codec = "1.3.4"
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
@@ -22,7 +18,6 @@ arrayvec = "0.5.1"
indexmap = "1.4.0"
[dev-dependencies]
parking_lot = "0.10.0"
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
assert_matches = "1.3.0"
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
@@ -19,6 +19,9 @@
//! This is responsible for distributing signed statements about candidate
//! validity amongst validators.
#![deny(unused_crate_dependencies)]
#![warn(missing_docs)]
use polkadot_subsystem::{
Subsystem, SubsystemResult, SubsystemContext, SpawnedSubsystem,
ActiveLeavesUpdate, FromOverseer, OverseerSignal,
+4 -4
View File
@@ -6,7 +6,7 @@ edition = "2018"
[dependencies]
futures = "0.3.5"
log = "0.4.8"
log = "0.4.11"
futures-timer = "3.0.2"
streamunordered = "0.5.1"
polkadot-primitives = { path = "../../primitives" }
@@ -21,6 +21,6 @@ sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
polkadot-node-network-protocol = { path = "../network/protocol" }
futures = { version = "0.3.5", features = ["thread-pool"] }
futures-timer = "3.0.2"
femme = "2.0.1"
log = "0.4.8"
kv-log-macro = "1.0.6"
femme = "2.1.1"
log = "0.4.11"
kv-log-macro = "1.0.7"
+24 -16
View File
@@ -54,6 +54,11 @@
//! ..................................................................
//! ```
// #![deny(unused_results)]
// unused dependencies can not work for test and examples at the same time
// yielding false positives
#![warn(missing_docs)]
use std::fmt::Debug;
use std::pin::Pin;
use std::sync::Arc;
@@ -96,7 +101,6 @@ const STOP_DELAY: u64 = 1;
// Target for logs.
const LOG_TARGET: &'static str = "overseer";
/// A type of messages that are sent from [`Subsystem`] to [`Overseer`].
///
/// It wraps a system-wide [`AllMessages`] type that represents all possible
@@ -160,7 +164,7 @@ impl From<FinalityNotification<Block>> for BlockInfo {
}
}
/// Some event from outer world.
/// Some event from the outer world.
enum Event {
BlockImported(BlockInfo),
BlockFinalized(BlockInfo),
@@ -173,7 +177,7 @@ enum Event {
enum ExternalRequest {
WaitForActivation {
hash: Hash,
response_channel: oneshot::Sender<()>,
response_channel: oneshot::Sender<SubsystemResult<()>>,
},
}
@@ -208,7 +212,7 @@ impl OverseerHandler {
/// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas,
/// the response channel may never return if the hash was deactivated before this call.
/// In this case, it's the caller's responsibility to ensure a timeout is set.
pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender<()>) -> SubsystemResult<()> {
pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender<SubsystemResult<()>>) -> SubsystemResult<()> {
self.events_tx.send(Event::ExternalRequest(ExternalRequest::WaitForActivation {
hash,
response_channel
@@ -303,7 +307,11 @@ impl<M: Send + 'static> SubsystemContext for OverseerSubsystemContext<M> {
}
async fn recv(&mut self) -> SubsystemResult<FromOverseer<M>> {
self.rx.next().await.ok_or(SubsystemError)
self.rx.next().await
.ok_or(SubsystemError::Context(
"No more messages in rx queue to process"
.to_owned()
))
}
async fn spawn(&mut self, name: &'static str, s: Pin<Box<dyn Future<Output = ()> + Send>>)
@@ -398,7 +406,7 @@ pub struct Overseer<S> {
s: S,
/// Here we keep handles to spawned subsystems to be notified when they terminate.
running_subsystems: FuturesUnordered<BoxFuture<'static, ()>>,
running_subsystems: FuturesUnordered<BoxFuture<'static, SubsystemResult<()>>>,
/// Gather running subsystms' outbound streams into one.
running_subsystems_rx: StreamUnordered<mpsc::Receiver<ToOverseer>>,
@@ -407,7 +415,7 @@ pub struct Overseer<S> {
events_rx: mpsc::Receiver<Event>,
/// External listeners waiting for a hash to be in the active-leave set.
activation_external_listeners: HashMap<Hash, Vec<oneshot::Sender<()>>>,
activation_external_listeners: HashMap<Hash, Vec<oneshot::Sender<SubsystemResult<()>>>>,
/// A set of leaves that `Overseer` starts working with.
///
@@ -1267,7 +1275,7 @@ where
loop {
select! {
_ = self.running_subsystems.next() => {
x = self.running_subsystems.next() => {
if self.running_subsystems.is_empty() {
break;
}
@@ -1285,7 +1293,7 @@ where
for (hash, number) in leaves.into_iter() {
update.activated.push(hash);
self.active_leaves.insert(hash, number);
let _ = self.active_leaves.insert(hash, number);
self.on_head_activated(&hash);
}
@@ -1331,7 +1339,7 @@ where
if let Poll::Ready(Some(finished)) = poll!(self.running_subsystems.next()) {
log::error!(target: LOG_TARGET, "Subsystem finished unexpectedly {:?}", finished);
self.stop().await;
return Err(SubsystemError);
return finished;
}
// Looks like nothing is left to be polled, let's take a break.
@@ -1353,7 +1361,7 @@ where
match self.active_leaves.entry(block.hash) {
hash_map::Entry::Vacant(entry) => {
update.activated.push(block.hash);
entry.insert(block.number);
let _ = entry.insert(block.number);
self.on_head_activated(&block.hash);
},
hash_map::Entry::Occupied(entry) => {
@@ -1541,7 +1549,7 @@ where
if let Some(listeners) = self.activation_external_listeners.remove(hash) {
for listener in listeners {
// it's fine if the listener is no longer interested
let _ = listener.send(());
let _ = listener.send(Ok(()));
}
}
}
@@ -1567,7 +1575,7 @@ where
ExternalRequest::WaitForActivation { hash, response_channel } => {
if self.active_leaves.get(&hash).is_some() {
// it's fine if the listener is no longer interested
let _ = response_channel.send(());
let _ = response_channel.send(Ok(()));
} else {
self.activation_external_listeners.entry(hash).or_default().push(response_channel);
}
@@ -1586,7 +1594,7 @@ where
fn spawn<S: SpawnNamed, M: Send + 'static>(
spawner: &mut S,
futures: &mut FuturesUnordered<BoxFuture<'static, ()>>,
futures: &mut FuturesUnordered<BoxFuture<'static, SubsystemResult<()>>>,
streams: &mut StreamUnordered<mpsc::Receiver<ToOverseer>>,
s: impl Subsystem<OverseerSubsystemContext<M>>,
) -> SubsystemResult<OverseenSubsystem<M>> {
@@ -1604,8 +1612,8 @@ fn spawn<S: SpawnNamed, M: Send + 'static>(
spawner.spawn(name, fut);
streams.push(from_rx);
futures.push(Box::pin(rx.map(|_| ())));
let _ = streams.push(from_rx);
futures.push(Box::pin(rx.map(|e| { log::warn!("Dropping error {:?}", e); Ok(()) })));
let instance = Some(SubsystemInstance {
tx: to_tx,
+2
View File
@@ -20,6 +20,8 @@
//! not shared between the node and the runtime. This crate builds on top of the primitives defined
//! there.
#![deny(missing_docs)]
use futures::Future;
use parity_scale_codec::{Decode, Encode};
use polkadot_primitives::v1::{
+5 -3
View File
@@ -16,12 +16,14 @@
//! Polkadot service. Specialized wrapper over substrate service.
#![deny(unused_results)]
pub mod chain_spec;
mod grandpa_support;
mod client;
use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider};
#[cfg(feature = "full-node")]
use log::info;
use polkadot_node_core_proposer::ProposerFactory;
use polkadot_overseer::{AllSubsystems, BlockInfo, Overseer, OverseerHandler};
@@ -383,7 +385,7 @@ pub fn new_full<RuntimeApi, Executor>(
})?;
if config.offchain_worker.enabled {
service::build_offchain_workers(
let _ = service::build_offchain_workers(
&config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(),
);
}
@@ -650,7 +652,7 @@ fn new_light<Runtime, Dispatch>(mut config: Configuration) -> Result<(TaskManage
})?;
if config.offchain_worker.enabled {
service::build_offchain_workers(
let _ = service::build_offchain_workers(
&config,
backend.clone(),
task_manager.spawn_handle(),
@@ -7,7 +7,6 @@ description = "Subsystem traits and message definitions"
[dependencies]
async-trait = "0.1"
derive_more = "0.99.9"
futures = "0.3.5"
futures-timer = "3.0.2"
log = "0.4.8"
@@ -16,6 +16,8 @@
//! Utilities for testing subsystems.
#![warn(missing_docs)]
use polkadot_node_subsystem::messages::AllMessages;
use polkadot_node_subsystem::{
FromOverseer, SubsystemContext, SubsystemError, SubsystemResult, Subsystem,
@@ -169,7 +171,8 @@ impl<M: Send + 'static, S: SpawnNamed + Send + 'static> SubsystemContext
}
async fn recv(&mut self) -> SubsystemResult<FromOverseer<M>> {
self.rx.next().await.ok_or(SubsystemError)
self.rx.next().await
.ok_or_else(|| SubsystemError::Context("Receiving end closed".to_owned()))
}
async fn spawn(
+2 -4
View File
@@ -7,20 +7,18 @@ description = "Subsystem traits and message definitions"
[dependencies]
async-trait = "0.1"
derive_more = "0.99.9"
futures = "0.3.5"
futures-timer = "3.0.2"
log = "0.4.8"
log = "0.4.11"
thiserror = "1.0.21"
parity-scale-codec = "1.3.4"
parking_lot = { version = "0.10.0", optional = true }
pin-project = "0.4.22"
smallvec = "1.4.1"
streamunordered = "0.5.1"
polkadot-node-primitives = { path = "../primitives" }
polkadot-node-subsystem = { path = "../subsystem" }
polkadot-primitives = { path = "../../primitives" }
polkadot-statement-table = { path = "../../statement-table" }
sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+53 -34
View File
@@ -22,6 +22,10 @@
//!
//! This crate also reexports Prometheus metric types which are expected to be implemented by subsystems.
#![deny(unused_results)]
// #![deny(unused_crate_dependencies] causes false positives
// https://github.com/rust-lang/rust/issues/57274
#![warn(missing_docs)]
use polkadot_node_subsystem::{
errors::{ChainApiError, RuntimeApiError},
@@ -63,6 +67,7 @@ use std::{
time::Duration,
};
use streamunordered::{StreamUnordered, StreamYield};
use thiserror::Error;
pub mod validator_discovery;
@@ -82,35 +87,38 @@ pub const JOB_GRACEFUL_STOP_DURATION: Duration = Duration::from_secs(1);
/// Capacity of channels to and from individual jobs
pub const JOB_CHANNEL_CAPACITY: usize = 64;
/// Utility errors
#[derive(Debug, derive_more::From)]
#[derive(Debug, Error)]
pub enum Error {
/// Attempted to send or receive on a oneshot channel which had been canceled
#[from]
Oneshot(oneshot::Canceled),
#[error(transparent)]
Oneshot(#[from] oneshot::Canceled),
/// Attempted to send on a MPSC channel which has been canceled
#[from]
Mpsc(mpsc::SendError),
#[error(transparent)]
Mpsc(#[from] mpsc::SendError),
/// A subsystem error
#[from]
Subsystem(SubsystemError),
#[error(transparent)]
Subsystem(#[from] SubsystemError),
/// An error in the Chain API.
#[from]
ChainApi(ChainApiError),
#[error(transparent)]
ChainApi(#[from] ChainApiError),
/// An error in the Runtime API.
#[from]
RuntimeApi(RuntimeApiError),
#[error(transparent)]
RuntimeApi(#[from] RuntimeApiError),
/// The type system wants this even though it doesn't make sense
#[from]
Infallible(std::convert::Infallible),
#[error(transparent)]
Infallible(#[from] std::convert::Infallible),
/// Attempted to convert from an AllMessages to a FromJob, and failed.
#[error("AllMessage not relevant to Job")]
SenderConversion(String),
/// The local node is not a validator.
#[error("Node is not a validator")]
NotAValidator,
/// The desired job is not present in the jobs list.
#[error("Relay parent {0} not of interest")]
JobNotFound(Hash),
/// Already forwarding errors to another sender
#[error("AlreadyForwarding")]
AlreadyForwarding,
}
@@ -496,7 +504,7 @@ pub trait JobTrait: Unpin {
/// Message type from the job. Typically a subset of AllMessages.
type FromJob: 'static + Into<AllMessages> + Send;
/// Job runtime error.
type Error: 'static + std::fmt::Debug + Send;
type Error: 'static + std::error::Error + Send;
/// Extra arguments this job needs to run properly.
///
/// If no extra information is needed, it is perfectly acceptable to set it to `()`.
@@ -538,13 +546,14 @@ pub trait JobTrait: Unpin {
/// Error which can be returned by the jobs manager
///
/// Wraps the utility error type and the job-specific error
#[derive(Debug, derive_more::From)]
pub enum JobsError<JobError> {
#[derive(Debug, Error)]
pub enum JobsError<JobError: 'static + std::error::Error> {
/// utility error
#[from]
Utility(Error),
#[error("Utility")]
Utility(#[source] Error),
/// internal job error
Job(JobError),
#[error("Internal")]
Job(#[source] JobError),
}
/// Jobs manager for a subsystem
@@ -645,7 +654,7 @@ impl<Spawner: SpawnNamed, Job: 'static + JobTrait> Jobs<Spawner, Job> {
outgoing_msgs_handle,
};
self.running.insert(parent_hash, handle);
let _ = self.running.insert(parent_hash, handle);
Ok(())
}
@@ -654,7 +663,7 @@ impl<Spawner: SpawnNamed, Job: 'static + JobTrait> Jobs<Spawner, Job> {
pub async fn stop_job(&mut self, parent_hash: Hash) -> Result<(), Error> {
match self.running.remove(&parent_hash) {
Some(handle) => {
Pin::new(&mut self.outgoing_msgs).remove(handle.outgoing_msgs_handle);
let _ = Pin::new(&mut self.outgoing_msgs).remove(handle.outgoing_msgs_handle);
handle.stop().await;
Ok(())
}
@@ -830,7 +839,8 @@ where
let metrics = metrics.clone();
if let Err(e) = jobs.spawn_job(hash, run_args.clone(), metrics) {
log::error!("Failed to spawn a job: {:?}", e);
Self::fwd_err(Some(hash), e.into(), err_tx).await;
let e = JobsError::Utility(e);
Self::fwd_err(Some(hash), e, err_tx).await;
return true;
}
}
@@ -838,7 +848,8 @@ where
for hash in deactivated {
if let Err(e) = jobs.stop_job(hash).await {
log::error!("Failed to stop a job: {:?}", e);
Self::fwd_err(Some(hash), e.into(), err_tx).await;
let e = JobsError::Utility(e);
Self::fwd_err(Some(hash), e, err_tx).await;
return true;
}
}
@@ -863,7 +874,8 @@ where
.await
{
log::error!("failed to stop all jobs on conclude signal: {:?}", e);
Self::fwd_err(None, Error::from(e).into(), err_tx).await;
let e = Error::from(e);
Self::fwd_err(None, JobsError::Utility(e), err_tx).await;
}
return true;
@@ -874,14 +886,16 @@ where
Some(hash) => {
if let Err(err) = jobs.send_msg(hash, to_job).await {
log::error!("Failed to send a message to a job: {:?}", err);
Self::fwd_err(Some(hash), err.into(), err_tx).await;
let e = JobsError::Utility(err);
Self::fwd_err(Some(hash), e, err_tx).await;
return true;
}
}
None => {
if let Err(err) = Job::handle_unanchored_msg(to_job) {
log::error!("Failed to handle unhashed message: {:?}", err);
Self::fwd_err(None, JobsError::Job(err), err_tx).await;
let e = JobsError::Job(err);
Self::fwd_err(None, e, err_tx).await;
return true;
}
}
@@ -891,7 +905,8 @@ where
Ok(Signal(BlockFinalized(_))) => {}
Err(err) => {
log::error!("error receiving message from subsystem context: {:?}", err);
Self::fwd_err(None, Error::from(err).into(), err_tx).await;
let e = JobsError::Utility(Error::from(err));
Self::fwd_err(None, e, err_tx).await;
return true;
}
}
@@ -906,7 +921,8 @@ where
) {
let msg = outgoing.expect("the Jobs stream never ends; qed");
if let Err(e) = ctx.send_message(msg.into()).await {
Self::fwd_err(None, Error::from(e).into(), err_tx).await;
let e = JobsError::Utility(e.into());
Self::fwd_err(None, e, err_tx).await;
}
}
}
@@ -1032,6 +1048,8 @@ pub struct Timeout<F: Future> {
/// Extends `Future` to allow time-limited futures.
pub trait TimeoutExt: Future {
/// Adds a timeout of `duration` to the given `Future`.
/// Returns a new `Future`.
fn timeout(self, duration: Duration) -> Timeout<Self>
where
Self: Sized,
@@ -1066,6 +1084,7 @@ impl<F: Future> Future for Timeout<F> {
#[cfg(test)]
mod tests {
use super::{Error as UtilError, JobManager, JobTrait, JobsError, TimeoutExt, ToJobTrait};
use thiserror::Error;
use polkadot_node_subsystem::{
messages::{AllMessages, CandidateSelectionMessage},
ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem,
@@ -1156,10 +1175,10 @@ mod tests {
// Error will mostly be a wrapper to make the try operator more convenient;
// deriving From implementations for most variants is recommended.
// It must implement Debug for logging.
#[derive(Debug, derive_more::From)]
#[derive(Debug, Error)]
enum Error {
#[from]
Sending(mpsc::SendError),
#[error(transparent)]
Sending(#[from]mpsc::SendError),
}
impl JobTrait for FakeCandidateSelectionJob {
@@ -1261,7 +1280,7 @@ mod tests {
fn starting_and_stopping_job_works() {
let relay_parent: Hash = [0; 32].into();
let mut run_args = HashMap::new();
run_args.insert(
let _ = run_args.insert(
relay_parent.clone(),
vec![FromJob::Test],
);
@@ -1317,7 +1336,7 @@ mod tests {
fn sending_to_a_non_running_job_do_not_stop_the_subsystem() {
let relay_parent = Hash::repeat_byte(0x01);
let mut run_args = HashMap::new();
run_args.insert(
let _ = run_args.insert(
relay_parent.clone(),
vec![FromJob::Test],
);
@@ -24,6 +24,7 @@ use futures::{
task::{Poll, self},
stream,
};
use thiserror::Error;
use polkadot_node_subsystem::{
errors::RuntimeApiError, SubsystemError,
@@ -34,17 +35,17 @@ use polkadot_primitives::v1::{Hash, ValidatorId, AuthorityDiscoveryId};
use sc_network::PeerId;
/// Error when making a request to connect to validators.
#[derive(Debug, derive_more::From)]
#[derive(Debug, Error)]
pub enum Error {
/// Attempted to send or receive on a oneshot channel which had been canceled
#[from]
Oneshot(oneshot::Canceled),
#[error(transparent)]
Oneshot(#[from] oneshot::Canceled),
/// A subsystem error.
#[from]
Subsystem(SubsystemError),
#[error(transparent)]
Subsystem(#[from] SubsystemError),
/// An error in the Runtime API.
#[from]
RuntimeApi(RuntimeApiError),
#[error(transparent)]
RuntimeApi(#[from] RuntimeApiError),
}
/// Utility function to make it easier to connect to validators.
+1
View File
@@ -21,6 +21,7 @@ polkadot-statement-table = { path = "../../statement-table" }
sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
smallvec = "1.4.1"
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
thiserror = "1.0.21"
[dev-dependencies]
assert_matches = "1.3.0"
+4
View File
@@ -32,6 +32,8 @@ impl core::fmt::Display for RuntimeApiError {
}
}
impl std::error::Error for RuntimeApiError {}
/// A description of an error causing the chain API request to be unservable.
#[derive(Debug, Clone)]
pub struct ChainApiError {
@@ -55,3 +57,5 @@ impl core::fmt::Display for ChainApiError {
write!(f, "{}", self.msg)
}
}
impl std::error::Error for ChainApiError {}
+35 -21
View File
@@ -31,6 +31,7 @@ use futures::future::BoxFuture;
use polkadot_primitives::v1::Hash;
use async_trait::async_trait;
use smallvec::SmallVec;
use thiserror::Error;
use crate::messages::AllMessages;
@@ -105,6 +106,7 @@ pub enum FromOverseer<M> {
},
}
/// An error type that describes faults that may happen
///
/// These are:
@@ -112,30 +114,42 @@ pub enum FromOverseer<M> {
/// * Subsystems dying when they are not expected to
/// * Subsystems not dying when they are told to die
/// * etc.
#[derive(Debug, PartialEq, Eq)]
pub struct SubsystemError;
#[derive(Error, Debug)]
pub enum SubsystemError {
/// A notification connection is no longer valid.
#[error(transparent)]
NotifyCancellation(#[from] oneshot::Canceled),
impl From<mpsc::SendError> for SubsystemError {
fn from(_: mpsc::SendError) -> Self {
Self
}
/// Queue does not accept another item.
#[error(transparent)]
QueueError(#[from] mpsc::SendError),
/// An attempt to spawn a futures task did not succeed.
#[error(transparent)]
TaskSpawn(#[from] futures::task::SpawnError),
/// An infallable error.
#[error(transparent)]
Infallible(#[from] std::convert::Infallible),
/// An other error lacking particular type information.
#[error("Failed to {0}")]
Context(String),
/// Per origin (or subsystem) annotations to wrap an error.
#[error("Error originated in {origin}")]
FromOrigin {
/// An additional anotation tag for the origin of `source`.
origin: &'static str,
/// The wrapped error. Marked as source for tracking the error chain.
#[source] source: Box<dyn std::error::Error + Send>
},
}
impl From<oneshot::Canceled> for SubsystemError {
fn from(_: oneshot::Canceled) -> Self {
Self
}
}
impl From<futures::task::SpawnError> for SubsystemError {
fn from(_: futures::task::SpawnError) -> Self {
Self
}
}
impl From<std::convert::Infallible> for SubsystemError {
fn from(e: std::convert::Infallible) -> Self {
match e {}
impl SubsystemError {
/// Adds a `str` as `origin` to the given error `err`.
pub fn with_origin<E: 'static + Send + std::error::Error>(origin: &'static str, err: E) -> Self {
Self::FromOrigin { origin, source: Box::new(err) }
}
}
+3 -2
View File
@@ -23,7 +23,7 @@
//! Subsystems' APIs are defined separately from their implementation, leading to easier mocking.
use futures::channel::{mpsc, oneshot};
use thiserror::Error;
use polkadot_node_network_protocol::{
v1 as protocol_v1, NetworkBridgeEvent, ReputationChange, PeerId,
};
@@ -97,7 +97,8 @@ impl CandidateBackingMessage {
}
/// Blanket error for validation failing for internal reasons.
#[derive(Debug)]
#[derive(Debug, Error)]
#[error("Validation failed with {0:?}")]
pub struct ValidationFailed(pub String);
/// Messages received by the Validation subsystem.
+1 -1
View File
@@ -17,7 +17,7 @@ sp-wasm-interface = { git = "https://github.com/paritytech/substrate", branch =
polkadot-core-primitives = { path = "../core-primitives", default-features = false }
# all optional crates.
derive_more = { version = "0.99.2", optional = true }
derive_more = { version = "0.99.11", optional = true }
serde = { version = "1.0.102", default-features = false, features = [ "derive" ], optional = true }
sp-externalities = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
+1 -1
View File
@@ -15,7 +15,6 @@ consensus = { package = "sp-consensus", git = "https://github.com/paritytech/sub
runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "master" }
futures = "0.3.4"
log = "0.4.8"
derive_more = "0.14.1"
codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] }
grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" }
inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master" }
@@ -26,6 +25,7 @@ block-builder = { package = "sc-block-builder", git = "https://github.com/parity
trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "master" }
babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" }
prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "master" }
thiserror = "1.0.21"
[dev-dependencies]
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
+12 -11
View File
@@ -16,24 +16,25 @@
//! Errors that can occur during the validation process.
use thiserror::Error;
/// Error type for validation
#[derive(Debug, derive_more::Display, derive_more::From)]
#[derive(Debug, Error)]
pub enum Error {
/// Client error
Client(sp_blockchain::Error),
#[error(transparent)]
Client(#[from] sp_blockchain::Error),
/// Consensus error
Consensus(consensus::error::Error),
#[error(transparent)]
Consensus(#[from] consensus::error::Error),
/// Unexpected error checking inherents
#[display(fmt = "Unexpected error while checking inherents: {}", _0)]
#[error("Unexpected error while checking inherents: {0}")]
InherentError(inherents::Error),
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Error::Client(ref err) => Some(err),
Error::Consensus(ref err) => Some(err),
_ => None,
}
impl std::convert::From<inherents::Error> for Error {
fn from(inner: inherents::Error) -> Self {
Self::InherentError(inner)
}
}