Merge branch 'master' into hc-fix-bridges-subtree

This commit is contained in:
Hernando Castano
2021-04-22 15:02:29 -04:00
10 changed files with 294 additions and 217 deletions
+51 -83
View File
@@ -109,23 +109,39 @@ test-deterministic-wasm:
script:
- ./scripts/gitlab/test_deterministic_wasm.sh
test-linux-stable:
test-build-linux-stable:
stage: test
<<: *rules-test
<<: *docker-env
<<: *compiler-info
<<: *collect-artifacts
variables:
RUST_TOOLCHAIN: stable
# Enable debug assertions since we are running optimized builds for testing
# but still want to have debug assertions.
RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings"
TARGET: native
artifacts:
paths:
- ./target/release/polkadot
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
- if: $CI_COMMIT_REF_NAME == "rococo-v1"
script:
- ./scripts/gitlab/test_linux_stable.sh
# we're using the bin built here, instead of having a parallel `build-linux-release`
- time cargo build --release --verbose --bin polkadot
- sccache -s
# pack-artifacts
- mkdir -p ./artifacts
- VERSION="${CI_COMMIT_REF_NAME}" # will be tag or branch name
- mv ./target/release/polkadot ./artifacts/.
- sha256sum ./artifacts/polkadot | tee ./artifacts/polkadot.sha256
- EXTRATAG="$(./artifacts/polkadot --version |
sed -n -r 's/^polkadot ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p')"
- EXTRATAG="${CI_COMMIT_REF_NAME}-${EXTRATAG}-$(cut -c 1-8 ./artifacts/polkadot.sha256)"
- echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})"
- echo -n ${VERSION} > ./artifacts/VERSION
- echo -n ${EXTRATAG} > ./artifacts/EXTRATAG
- cp -r scripts/docker/* ./artifacts
check-web-wasm:
stage: test
@@ -148,43 +164,6 @@ check-runtime-benchmarks:
- ./scripts/gitlab/check_runtime_benchmarks.sh
- sccache -s
.pack-artifacts: &pack-artifacts
- mkdir -p ./artifacts
- VERSION="${CI_COMMIT_REF_NAME}" # will be tag or branch name
- mv ./target/release/polkadot ./artifacts/.
- sha256sum ./artifacts/polkadot | tee ./artifacts/polkadot.sha256
- if [ "${CI_COMMIT_TAG}" ]; then
EXTRATAG="latest";
else
EXTRATAG="$(./artifacts/polkadot --version |
sed -n -r 's/^polkadot ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p')";
EXTRATAG="${CI_COMMIT_REF_NAME}-${EXTRATAG}-$(cut -c 1-8 ./artifacts/polkadot.sha256)";
fi
- echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})"
- echo -n ${VERSION} > ./artifacts/VERSION
- echo -n ${EXTRATAG} > ./artifacts/EXTRATAG
- cp -r scripts/docker/* ./artifacts
build-linux-release:
stage: test
<<: *collect-artifacts
<<: *docker-env
<<: *compiler-info
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
variables:
RUSTFLAGS: "-Cdebug-assertions=y"
- if: $CI_COMMIT_REF_NAME == "rococo-v1"
variables:
RUSTFLAGS: "-Cdebug-assertions=y"
script:
- time cargo build --release --verbose
- sccache -s
- *pack-artifacts
build-adder-collator:
stage: test
<<: *collect-artifacts
@@ -213,7 +192,7 @@ check-transaction-versions:
<<: *rules-test
<<: *docker-env
needs:
- job: test-linux-stable
- job: test-build-linux-stable
artifacts: true
before_script:
- apt-get -y update; apt-get -y install jq lsof
@@ -235,19 +214,16 @@ generate-impl-guide:
.build-push-image: &build-push-image
<<: *kubernetes-env
image: quay.io/buildah/stable
variables:
variables: &image-variables
GIT_STRATEGY: none
# scripts/docker/Dockerfile
DOCKERFILE: Dockerfile
IMAGE_NAME: docker.io/parity/polkadot
DOCKER_USER: ${Docker_Hub_User_Parity}
DOCKER_PASS: ${Docker_Hub_Pass_Parity}
DOCKER_USER: ${PARITYPR_USER}
DOCKER_PASS: ${PARITYPR_PASS}
before_script: &check-versions
- test -s ./artifacts/VERSION || exit 1
- test -s ./artifacts/EXTRATAG || exit 1
- VERSION="$(cat ./artifacts/VERSION)"
- EXTRATAG="$(cat ./artifacts/EXTRATAG)"
- echo "Polkadot version = ${VERSION} (EXTRATAG ${EXTRATAG})"
- echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})"
script:
- test "$DOCKER_USER" -a "$DOCKER_PASS" ||
( echo "no docker credentials provided"; exit 1 )
@@ -280,6 +256,11 @@ generate-impl-guide:
publish-polkadot-image:
stage: build
<<: *build-push-image
variables:
<<: *image-variables
# scripts/docker/Dockerfile
DOCKERFILE: Dockerfile
IMAGE_NAME: docker.io/paritypr/synth-wave
rules:
# Don't run on releases - this is handled by the Github Action here:
# .github/workflows/publish-docker-release.yml
@@ -288,15 +269,14 @@ publish-polkadot-image:
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
variables:
IMAGE_NAME: docker.io/paritypr/synth-wave
DOCKER_USER: ${PARITYPR_USER}
DOCKER_PASS: ${PARITYPR_PASS}
- if: $CI_COMMIT_REF_NAME == "rococo-v1"
variables:
<<: *image-variables
IMAGE_NAME: docker.io/parity/rococo
DOCKER_USER: ${Docker_Hub_User_Parity}
DOCKER_PASS: ${Docker_Hub_Pass_Parity}
needs:
- job: build-linux-release
- job: test-build-linux-stable
artifacts: true
publish-adder-collator-image:
@@ -304,15 +284,13 @@ publish-adder-collator-image:
stage: build
<<: *build-push-image
variables:
<<: *image-variables
# scripts/docker/collator.Dockerfile
DOCKERFILE: collator.Dockerfile
IMAGE_NAME: docker.io/paritypr/colander
DOCKER_USER: ${PARITYPR_USER}
DOCKER_PASS: ${PARITYPR_PASS}
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
# FIXME: remove me after merging
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
needs:
- job: build-adder-collator
@@ -332,51 +310,41 @@ publish-adder-collator-image:
publish-s3-release: &publish-s3
stage: publish
needs:
- job: build-linux-release
- job: test-build-linux-stable
artifacts: true
<<: *kubernetes-env
image: paritytech/awscli:latest
variables:
GIT_STRATEGY: none
BUCKET: "releases.parity.io"
PREFIX: "polkadot/${ARCH}-${DOCKER_OS}"
PREFIX: "builds/polkadot/${ARCH}-${DOCKER_OS}"
rules:
# publishing binaries nightly
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
before_script:
- *check-versions
script:
- echo "uploading objects to https://${BUCKET}/${PREFIX}/${VERSION}"
- aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/${VERSION}/
- echo "update objects at https://${BUCKET}/${PREFIX}/${EXTRATAG}"
- echo "uploading objects to https://releases.parity.io/${PREFIX}/${VERSION}"
- aws s3 sync --acl public-read ./artifacts/ s3://${AWS_BUCKET}/${PREFIX}/${VERSION}/
- echo "update objects at https://releases.parity.io/${PREFIX}/${EXTRATAG}"
- find ./artifacts -type f | while read file; do
name="${file#./artifacts/}";
aws s3api copy-object
--copy-source ${BUCKET}/${PREFIX}/${VERSION}/${name}
--bucket ${BUCKET} --key ${PREFIX}/${EXTRATAG}/${name};
name="${file#./artifacts/}";
aws s3api copy-object
--copy-source ${AWS_BUCKET}/${PREFIX}/${VERSION}/${name}
--bucket ${AWS_BUCKET} --key ${PREFIX}/${EXTRATAG}/${name};
done
- |
cat <<-EOM
|
| polkadot binary paths:
|
| - https://${BUCKET}/${PREFIX}/${EXTRATAG}/polkadot
| - https://${BUCKET}/${PREFIX}/${VERSION}/polkadot
| - https://releases.parity.io/${PREFIX}/${EXTRATAG}/polkadot
| - https://releases.parity.io/${PREFIX}/${VERSION}/polkadot
|
EOM
after_script:
- aws s3 ls s3://${BUCKET}/${PREFIX}/${EXTRATAG}/
- aws s3 ls s3://${AWS_BUCKET}/${PREFIX}/${EXTRATAG}/
--recursive --human-readable --summarize
publish-s3-adder-collator:
<<: *publish-s3
variables:
PREFIX: "rococo/${ARCH}-${DOCKER_OS}"
needs:
- job: build-adder-collator
artifacts: true
#### stage: deploy
deploy-polkasync-kusama:
@@ -404,7 +372,7 @@ trigger-simnet:
needs:
- job: publish-polkadot-image
- job: publish-adder-collator-image
# `build.env` brings here `$IMAGE_NAME` and `$IMAGE_TAG` (`$EXTRATAG` here,
# `build.env` brings here `$IMAGE_NAME` and `$IMAGE_TAG` (`$EXTRATAG` here,
# i.e. `2643-0.8.29-5f689e0a-6b24dc54`).
# `collator.env` bears adder-collator unique build tag. In non-triggered builds it
# can be called by `master` tag.
-2
View File
@@ -11263,7 +11263,6 @@ dependencies = [
"pallet-authorship",
"pallet-babe",
"pallet-balances",
"pallet-beefy",
"pallet-collective",
"pallet-democracy",
"pallet-election-provider-multi-phase",
@@ -11273,7 +11272,6 @@ dependencies = [
"pallet-im-online",
"pallet-indices",
"pallet-membership",
"pallet-mmr",
"pallet-mmr-primitives",
"pallet-multisig",
"pallet-nicks",
+3 -2
View File
@@ -89,6 +89,7 @@ impl SubstrateCli for Cli {
"rococo-staging" => Box::new(service::chain_spec::rococo_staging_testnet_config()?),
"rococo-local" => Box::new(service::chain_spec::rococo_local_testnet_config()?),
"rococo" => Box::new(service::chain_spec::rococo_config()?),
"wococo" => Box::new(service::chain_spec::wococo_config()?),
path => {
let path = std::path::PathBuf::from(path);
@@ -98,7 +99,7 @@ impl SubstrateCli for Cli {
// When `force_*` is given or the file name starts with the name of one of the known chains,
// we use the chain spec for the specific chain.
if self.run.force_rococo || starts_with("rococo") {
if self.run.force_rococo || starts_with("rococo") || starts_with("wococo") {
Box::new(service::RococoChainSpec::from_json_file(path)?)
} else if self.run.force_kusama || starts_with("kusama") {
Box::new(service::KusamaChainSpec::from_json_file(path)?)
@@ -116,7 +117,7 @@ impl SubstrateCli for Cli {
&service::kusama_runtime::VERSION
} else if spec.is_westend() {
&service::westend_runtime::VERSION
} else if spec.is_rococo() {
} else if spec.is_rococo() || spec.is_wococo() {
&service::rococo_runtime::VERSION
} else {
&service::polkadot_runtime::VERSION
File diff suppressed because one or more lines are too long
+8 -20
View File
@@ -112,6 +112,11 @@ pub fn rococo_config() -> Result<PolkadotChainSpec, String> {
PolkadotChainSpec::from_json_bytes(&include_bytes!("../res/rococo.json")[..])
}
/// This is a temporary testnet that uses the same runtime as rococo.
pub fn wococo_config() -> Result<PolkadotChainSpec, String> {
PolkadotChainSpec::from_json_bytes(&include_bytes!("../res/wococo.json")[..])
}
fn polkadot_session_keys(
babe: BabeId,
grandpa: GrandpaId,
@@ -155,7 +160,6 @@ fn westend_session_keys(
para_validator: ValidatorId,
para_assignment: AssignmentId,
authority_discovery: AuthorityDiscoveryId,
beefy: BeefyId,
) -> westend::SessionKeys {
westend::SessionKeys {
babe,
@@ -164,7 +168,6 @@ fn westend_session_keys(
para_validator,
para_assignment,
authority_discovery,
beefy,
}
}
@@ -296,7 +299,6 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Genesi
// for i in 1 2 3 4; do for j in grandpa; do subkey --ed25519 inspect "$SECRET//$i//$j"; done; done
// for i in 1 2 3 4; do for j in im_online; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done
// for i in 1 2 3 4; do for j in para_validator para_assignment; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done
// for i in 1 2 3 4; do for j in beefy; do subkey --ecdsa inspect "$SECRET//$i//$j"; done; done
let initial_authorities: Vec<(
AccountId,
AccountId,
@@ -306,7 +308,6 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Genesi
ValidatorId,
AssignmentId,
AuthorityDiscoveryId,
BeefyId,
)> = vec![
(
//5FZoQhgUCmqBxnkHX7jCqThScS2xQWiwiF61msg63CFL3Y8f
@@ -325,8 +326,6 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Genesi
hex!["0810d2113438bb14856b06383a4f0da4e5cc2f92a3fc18ef03a54b34c6007662"].unchecked_into(),
//5CkAdj1MpkMtQikrGXuzgzrRLvUnfLQH2JsnZa16u4cK2Xhf
hex!["1e18b5a9f872727189934a6988ff2a6732c87b9e31e2d694dd011aff9dfb2332"].unchecked_into(),
//5E6ogZEZyc5YZ3ijWUPB9M6Xtx6E9FabhmP9J4rwcEH6pLGv
hex!["0293be7cdb81f25039dfd01aac905da8a5e50113366bc4b5dc5eb888cf5552b8a9"].unchecked_into(),
),
(
//5G1ojzh47Yt8KoYhuAjXpHcazvsoCXe3G8LZchKDvumozJJJ
@@ -345,8 +344,6 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Genesi
hex!["d6a113804a98728bb2af4f3721ab31a3644731292bffe0268995d8f8fb073b57"].unchecked_into(),
//5DUrcztb1pRz6DfA8Vo8JSUSpoQVr27Yo6gjPmnumhhubLeN
hex!["3ea7a06009d1b9b1d4233dea3e6bb6494b9aeda91edc443629a28afa9fab8c62"].unchecked_into(),
//5HUYyVYXjm5mdEpDiykmhxZGzQjY4LLxyTPD7hfMjfLUy3VT
hex!["020e0ba5e112f0d3356ff8c78a37e2d7f76f90ab8dc9ed2eac98c87c5ffb2b0ebe"].unchecked_into(),
),
(
//5HYYWyhyUQ7Ae11f8fCid58bhJ7ikLHM9bU8A6Ynwoc3dStR
@@ -365,8 +362,6 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Genesi
hex!["0e5e1fb2c0fa7db11cd83fef3493900292badf02f35812ba738efeab9978a46c"].unchecked_into(),
//5GvKehGrFVea8rywSeJhTopmpBDNHSFBoZp32g3CecppYa3V
hex!["d6c8735316211321cd85ccd7c583222ab024393b8c86c7c8d1192a1d4f35bb2e"].unchecked_into(),
//5FpX3V5qCGehdTBRxkpHzGjwK9nvihLYj6gwR4NWn8DjbAoL
hex!["020b4bc2972761bd1abf20d5f83f79ff546ef63094e193d21758566c58dea9642f"].unchecked_into(),
),
(
//5CFPcUJgYgWryPaV1aYjSbTpbTLu42V32Ytw1L9rfoMAsfGh
@@ -385,8 +380,6 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Genesi
hex!["c4390ca0274f0262a4ef7cd4d3aa6cab0875a6efdd40d38c21be4f770b6c4b1a"].unchecked_into(),
//5CZd519gfE3gALMtFWa283VHikXwoGFmT92B3Nu3iN7YGcaR
hex!["160e0049b62d368c59d286275697e8d5e68d34ee8663ac4c3da646b0abb4a86f"].unchecked_into(),
//5Fnu4YYBx9V71ihCBkJyFGsKw9Q2jjNzRQL9kRNpKTPNSAhc
hex!["03e9393ee30ae95fc2b7864230f53e45409a807949390140ce2bc77756cdb4bb83"].unchecked_into(),
),
];
@@ -406,7 +399,6 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Genesi
.chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH)))
.collect(),
},
pallet_beefy: Default::default(),
pallet_indices: westend::IndicesConfig { indices: vec![] },
pallet_session: westend::SessionConfig {
keys: initial_authorities
@@ -422,7 +414,6 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Genesi
x.5.clone(),
x.6.clone(),
x.7.clone(),
x.8.clone(),
),
)
})
@@ -1311,7 +1302,6 @@ pub fn westend_testnet_genesis(
ValidatorId,
AssignmentId,
AuthorityDiscoveryId,
BeefyId,
)>,
root_key: AccountId,
endowed_accounts: Option<Vec<AccountId>>,
@@ -1333,7 +1323,6 @@ pub fn westend_testnet_genesis(
.map(|k| (k.clone(), ENDOWMENT))
.collect(),
},
pallet_beefy: Default::default(),
pallet_session: westend::SessionConfig {
keys: initial_authorities
.iter()
@@ -1348,7 +1337,6 @@ pub fn westend_testnet_genesis(
x.5.clone(),
x.6.clone(),
x.7.clone(),
x.8.clone(),
),
)
})
@@ -1515,7 +1503,7 @@ fn kusama_development_config_genesis(wasm_binary: &[u8]) -> kusama::GenesisConfi
fn westend_development_config_genesis(wasm_binary: &[u8]) -> westend::GenesisConfig {
westend_testnet_genesis(
wasm_binary,
vec![get_authority_keys_from_seed("Alice")],
vec![get_authority_keys_from_seed_no_beefy("Alice")],
get_account_id_from_seed::<sr25519::Public>("Alice"),
None,
)
@@ -1634,8 +1622,8 @@ fn westend_local_testnet_genesis(wasm_binary: &[u8]) -> westend::GenesisConfig {
westend_testnet_genesis(
wasm_binary,
vec![
get_authority_keys_from_seed("Alice"),
get_authority_keys_from_seed("Bob"),
get_authority_keys_from_seed_no_beefy("Alice"),
get_authority_keys_from_seed_no_beefy("Bob"),
],
get_account_id_from_seed::<sr25519::Public>("Alice"),
None,
+18 -15
View File
@@ -151,7 +151,7 @@ pub enum Error {
DatabasePathRequired,
}
/// Can be called for a `Configuration` to check if it is a configuration for the `Kusama` network.
/// Can be called for a `Configuration` to identify which network the configuration targets.
pub trait IdentifyVariant {
/// Returns if this is a configuration for the `Kusama` network.
fn is_kusama(&self) -> bool;
@@ -162,6 +162,9 @@ pub trait IdentifyVariant {
/// Returns if this is a configuration for the `Rococo` network.
fn is_rococo(&self) -> bool;
/// Returns if this is a configuration for the `Wococo` test network.
fn is_wococo(&self) -> bool;
/// Returns true if this configuration is for a development network.
fn is_dev(&self) -> bool;
}
@@ -176,6 +179,9 @@ impl IdentifyVariant for Box<dyn ChainSpec> {
fn is_rococo(&self) -> bool {
self.id().starts_with("rococo") || self.id().starts_with("rco")
}
fn is_wococo(&self) -> bool {
self.id().starts_with("wococo") || self.id().starts_with("wco")
}
fn is_dev(&self) -> bool {
self.id().ends_with("dev")
}
@@ -680,7 +686,7 @@ pub fn new_full<RuntimeApi, Executor>(
let backoff_authoring_blocks = {
let mut backoff = sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default();
if config.chain_spec.is_rococo() {
if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() {
// it's a testnet that's in flux, finality has stalled sometimes due
// to operational issues and it's annoying to slow down block
// production to 1 block per hour.
@@ -715,7 +721,7 @@ pub fn new_full<RuntimeApi, Executor>(
// Substrate nodes.
config.network.extra_sets.push(grandpa::grandpa_peers_set_config());
if config.chain_spec.is_westend() || config.chain_spec.is_rococo() {
if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() {
config.network.extra_sets.push(beefy_gadget::beefy_peers_set_config());
}
@@ -921,24 +927,21 @@ pub fn new_full<RuntimeApi, Executor>(
task_manager.spawn_essential_handle().spawn_blocking("babe", babe);
}
// We currently only run the BEEFY gadget on Rococo and Westend test
// networks. On Rococo we start the BEEFY gadget as a normal (non-essential)
// task for now, since BEEFY is still experimental and we don't want a
// failure to bring down the whole node. Westend test network is less used
// than Rococo and therefore a failure there will be less problematic, this
// will be the main testing target for BEEFY for now.
if chain_spec.is_westend() || chain_spec.is_rococo() {
// We currently only run the BEEFY gadget on the Rococo and Wococo testnets.
if chain_spec.is_rococo() || chain_spec.is_wococo() {
let gadget = beefy_gadget::start_beefy_gadget::<_, beefy_primitives::ecdsa::AuthorityPair, _, _, _, _>(
client.clone(),
keystore_container.sync_keystore(),
network.clone(),
beefy_link,
network.clone(),
if chain_spec.is_westend() { 4 } else { 8 },
if chain_spec.is_wococo() { 4 } else { 8 },
prometheus_registry.clone()
);
if chain_spec.is_westend() {
// Wococo's purpose is to be a testbed for BEEFY, so if it fails we'll
// bring the node down with it to make sure it is noticed.
if chain_spec.is_wococo() {
task_manager.spawn_essential_handle().spawn_blocking("beefy-gadget", gadget);
} else {
task_manager.spawn_handle().spawn_blocking("beefy-gadget", gadget);
@@ -1174,7 +1177,7 @@ pub fn new_chain_ops(
>
{
config.keystore = service::config::KeystoreConfig::InMemory;
if config.chain_spec.is_rococo() {
if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() {
let service::PartialComponents { client, backend, import_queue, task_manager, .. }
= new_partial::<rococo_runtime::RuntimeApi, RococoExecutor>(config, jaeger_agent, None)?;
Ok((Arc::new(Client::Rococo(client)), backend, import_queue, task_manager))
@@ -1198,7 +1201,7 @@ pub fn build_light(config: Configuration) -> Result<(
TaskManager,
RpcHandlers,
), Error> {
if config.chain_spec.is_rococo() {
if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() {
new_light::<rococo_runtime::RuntimeApi, RococoExecutor>(config)
} else if config.chain_spec.is_kusama() {
new_light::<kusama_runtime::RuntimeApi, KusamaExecutor>(config)
@@ -1217,7 +1220,7 @@ pub fn build_full(
jaeger_agent: Option<std::net::SocketAddr>,
telemetry_worker_handle: Option<TelemetryWorkerHandle>,
) -> Result<NewFull<Client>, Error> {
if config.chain_spec.is_rococo() {
if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() {
new_full::<rococo_runtime::RuntimeApi, RococoExecutor>(
config,
is_collator,
-5
View File
@@ -35,7 +35,6 @@ sp-npos-elections = { git = "https://github.com/paritytech/substrate", branch =
pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
pallet-beefy = { git = "https://github.com/paritytech/grandpa-bridge-gadget", branch = "master", default-features = false }
pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
@@ -49,7 +48,6 @@ pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "m
pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
pallet-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
pallet-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
@@ -128,8 +126,6 @@ std = [
"pallet-im-online/std",
"pallet-indices/std",
"pallet-membership/std",
"pallet-beefy/std",
"pallet-mmr/std",
"pallet-mmr-primitives/std",
"beefy-primitives/std",
"pallet-multisig/std",
@@ -179,7 +175,6 @@ runtime-benchmarks = [
"pallet-identity/runtime-benchmarks",
"pallet-im-online/runtime-benchmarks",
"pallet-indices/runtime-benchmarks",
"pallet-mmr/runtime-benchmarks",
"pallet-multisig/runtime-benchmarks",
"pallet-proxy/runtime-benchmarks",
"pallet-scheduler/runtime-benchmarks",
+15 -81
View File
@@ -31,7 +31,6 @@ use primitives::v1::{
InboundDownwardMessage, InboundHrmpMessage, SessionInfo,
};
use runtime_common::{
mmr as mmr_common,
SlowAdjustingFeeUpdate, CurrencyToVote,
impls::ToAuthor,
BlockHashCount, BlockWeights, BlockLength, RocksDbWeight,
@@ -43,7 +42,7 @@ use sp_runtime::{
ApplyExtrinsicResult, KeyTypeId, Perbill, curve::PiecewiseLinear,
transaction_validity::{TransactionValidity, TransactionSource, TransactionPriority},
traits::{
Keccak256, BlakeTwo256, Block as BlockT, OpaqueKeys, ConvertInto, AccountIdLookup,
BlakeTwo256, Block as BlockT, OpaqueKeys, ConvertInto, AccountIdLookup,
Extrinsic as ExtrinsicT, SaturatedConversion, Verify,
},
};
@@ -91,7 +90,6 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: create_runtime_str!("westend"),
impl_name: create_runtime_str!("parity-westend"),
authoring_version: 2,
// NOTE: see https://github.com/paritytech/polkadot/wiki/Westend
spec_version: 51,
impl_version: 0,
#[cfg(not(feature = "disable-runtime-api"))]
@@ -269,18 +267,6 @@ parameter_types! {
pub const Offset: BlockNumber = 0;
}
// TODO [ToDr] Remove while BEEFY runtime upgrade is done.
impl_opaque_keys! {
pub struct OldSessionKeys {
pub grandpa: Grandpa,
pub babe: Babe,
pub im_online: ImOnline,
pub para_validator: ParachainSessionKeyPlaceholder<Runtime>,
pub para_assignment: AssignmentSessionKeyPlaceholder<Runtime>,
pub authority_discovery: AuthorityDiscovery,
}
}
impl_opaque_keys! {
pub struct SessionKeys {
pub grandpa: Grandpa,
@@ -289,28 +275,6 @@ impl_opaque_keys! {
pub para_validator: ParachainSessionKeyPlaceholder<Runtime>,
pub para_assignment: AssignmentSessionKeyPlaceholder<Runtime>,
pub authority_discovery: AuthorityDiscovery,
pub beefy: Beefy,
}
}
fn transform_session_keys(v: AccountId, old: OldSessionKeys) -> SessionKeys {
SessionKeys {
grandpa: old.grandpa,
babe: old.babe,
im_online: old.im_online,
para_validator: old.para_validator,
para_assignment: old.para_assignment,
authority_discovery: old.authority_discovery,
beefy: runtime_common::dummy_beefy_id_from_account_id(v),
}
}
// When this is removed, should also remove `OldSessionKeys`.
pub struct UpgradeSessionKeys;
impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys {
fn on_runtime_upgrade() -> frame_support::weights::Weight {
Session::upgrade_keys::<OldSessionKeys, _>(transform_session_keys);
Perbill::from_percent(50) * BlockWeights::get().max_block
}
}
@@ -622,24 +586,6 @@ impl pallet_sudo::Config for Runtime {
type Call = Call;
}
impl pallet_beefy::Config for Runtime {
type AuthorityId = BeefyId;
}
impl pallet_mmr::Config for Runtime {
const INDEXING_PREFIX: &'static [u8] = b"mmr";
type Hashing = Keccak256;
type Hash = <Keccak256 as sp_runtime::traits::Hash>::Output;
type OnNewRoot = mmr_common::DepositBeefyDigest<Runtime>;
type WeightInfo = ();
type LeafData = mmr_common::Pallet<Runtime>;
}
impl mmr_common::Config for Runtime {
type BeefyAuthorityToMerkleLeaf = mmr_common::UncompressBeefyEcdsaKeys;
type ParachainHeads = ();
}
parameter_types! {
// One storage item; key size 32, value size 8; .
pub const ProxyDepositBase: Balance = deposit(1, 8);
@@ -798,11 +744,6 @@ construct_runtime! {
// Election pallet. Only works with staking, but placed here to maintain indices.
ElectionProviderMultiPhase: pallet_election_provider_multi_phase::{Pallet, Call, Storage, Event<T>, ValidateUnsigned} = 24,
// Bridges support.
Mmr: pallet_mmr::{Pallet, Call, Storage} = 28,
Beefy: pallet_beefy::{Pallet, Config<T>, Storage} = 29,
MmrLeaf: mmr_common::{Pallet, Storage} = 30,
}
}
@@ -841,7 +782,7 @@ pub type Executive = frame_executive::Executive<
frame_system::ChainContext<Runtime>,
Runtime,
AllPallets,
UpgradeSessionKeys,
(),
>;
/// The payload being signed in transactions.
pub type SignedPayload = generic::SignedPayload<Call, SignedExtra>;
@@ -976,40 +917,33 @@ sp_api::impl_runtime_apis! {
impl beefy_primitives::BeefyApi<Block, BeefyId> for Runtime {
fn validator_set() -> beefy_primitives::ValidatorSet<BeefyId> {
Beefy::validator_set()
// dummy implementation due to lack of BEEFY pallet.
beefy_primitives::ValidatorSet { validators: Vec::new(), id: 0 }
}
}
impl pallet_mmr_primitives::MmrApi<Block, Hash> for Runtime {
fn generate_proof(leaf_index: u64)
fn generate_proof(_leaf_index: u64)
-> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof<Hash>), mmr::Error>
{
Mmr::generate_proof(leaf_index)
.map(|(leaf, proof)| (mmr::EncodableOpaqueLeaf::from_leaf(&leaf), proof))
// dummy implementation due to lack of MMR pallet.
Err(mmr::Error::GenerateProof)
}
fn verify_proof(leaf: mmr::EncodableOpaqueLeaf, proof: mmr::Proof<Hash>)
fn verify_proof(_leaf: mmr::EncodableOpaqueLeaf, _proof: mmr::Proof<Hash>)
-> Result<(), mmr::Error>
{
pub type Leaf = <
<Runtime as pallet_mmr::Config>::LeafData as mmr::LeafDataProvider
>::LeafData;
let leaf: Leaf = leaf
.into_opaque_leaf()
.try_decode()
.ok_or(mmr::Error::Verify)?;
Mmr::verify_leaf(leaf, proof)
// dummy implementation due to lack of MMR pallet.
Err(mmr::Error::Verify)
}
fn verify_proof_stateless(
root: Hash,
leaf: mmr::EncodableOpaqueLeaf,
proof: mmr::Proof<Hash>
_root: Hash,
_leaf: mmr::EncodableOpaqueLeaf,
_proof: mmr::Proof<Hash>
) -> Result<(), mmr::Error> {
type MmrHashing = <Runtime as pallet_mmr::Config>::Hashing;
let node = mmr::DataOrHash::Data(leaf.into_opaque_leaf());
pallet_mmr::verify_leaf_proof::<MmrHashing, _>(root, node, proof)
// dummy implementation due to lack of MMR pallet.
Err(mmr::Error::Verify)
}
}
+1 -1
View File
@@ -4,4 +4,4 @@ set -e
#shellcheck source=../common/lib.sh
source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh"
time cargo test --all --release --verbose --locked --features=runtime-benchmarks
time cargo test --workspace --release --verbose --locked --features=runtime-benchmarks
+14 -8
View File
@@ -1,7 +1,10 @@
#!/bin/bash
set -eu
# API trigger another project's pipeline
echo "Triggering Simnet pipeline."
curl --silent \
-X POST \
-F "token=${CI_JOB_TOKEN}" \
@@ -12,13 +15,14 @@ curl --silent \
-F "variables[IMAGE_TAG]=${IMAGE_TAG}" \
-F "variables[COLLATOR_IMAGE_TAG]=${COLLATOR_IMAGE_TAG}" \
"https://${CI_SERVER_HOST}/api/v4/projects/${DWNSTRM_ID}/trigger/pipeline" | \
tee pipeline
tee pipeline;
PIPELINE_ID=$(cat pipeline | jq ".id")
echo "\nWaiting on ${PIPELINE_ID} status..."
PIPELINE_URL=$(cat pipeline | jq ".web_url")
echo
echo "Simnet pipeline ${PIPELINE_URL} was successfully triggered."
echo "Now we're polling it to obtain the distinguished status."
# This part polls for the triggered pipeline status, the native
# `trigger` job does not return this status via API.
# This is a workaround for a Gitlab bug, waits here until
# https://gitlab.com/gitlab-org/gitlab/-/issues/326137 gets fixed.
# The timeout is 360 curls with 8 sec interval, roughly an hour.
@@ -30,17 +34,19 @@ function get_status() {
jq --raw-output ".status";
}
echo "Waiting on ${PIPELINE_ID} status..."
for i in $(seq 1 360); do
STATUS=$(get_status);
echo "Triggered pipeline status is ${STATUS}";
if [[ ${STATUS} =~ ^(pending|running|created)$ ]]; then
echo "Busy...";
echo;
elif [[ ${STATUS} =~ ^(failed|canceled|skipped|manual)$ ]]; then
exit 1;
echo "Something's broken in: ${PIPELINE_URL}"; exit 1;
elif [[ ${STATUS} =~ ^(success)$ ]]; then
exit 0;
echo "Look how green it is: ${PIPELINE_URL}"; exit 0;
else
exit 1;
echo "Something else has happened in ${PIPELINE_URL}"; exit 1;
fi
sleep 8;
done