mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-26 00:37:57 +00:00
409 lines
13 KiB
YAML
409 lines
13 KiB
YAML
# .gitlab-ci.yml
|
|
#
|
|
# substrate
|
|
#
|
|
# pipelines can be triggered manually in the web
|
|
#
|
|
# Currently the file is divided into subfiles. Each stage has a different file which
|
|
# can be found here: scripts/ci/gitlab/pipeline/<stage_name>.yml
|
|
#
|
|
# Instead of YAML anchors "extends" is used.
|
|
# Useful links:
|
|
# https://docs.gitlab.com/ee/ci/yaml/index.html#extends
|
|
# https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#reference-tags
|
|
#
|
|
# SAMPLE JOB TEMPLATE - This is not a complete example but is enough to build a
|
|
# simple CI job. For full documentation, visit https://docs.gitlab.com/ee/ci/yaml/
|
|
#
|
|
# my-example-job:
|
|
# stage: test # One of the stages listed below this job (required)
|
|
# image: paritytech/tools:latest # Any docker image (required)
|
|
# allow_failure: true # Allow the pipeline to continue if this job fails (default: false)
|
|
# needs:
|
|
# - job: test-linux # Any jobs that are required to run before this job (optional)
|
|
# variables:
|
|
# MY_ENVIRONMENT_VARIABLE: "some useful value" # Environment variables passed to the job (optional)
|
|
# script:
|
|
# - echo "List of shell commands to run in your job"
|
|
# - echo "You can also just specify a script here, like so:"
|
|
# - ./scripts/ci/gitlab/my_amazing_script.sh
|
|
|
|
stages:
|
|
- check
|
|
- test
|
|
- build
|
|
- publish
|
|
- notify
|
|
- zombienet
|
|
- deploy
|
|
|
|
workflow:
|
|
rules:
|
|
- if: $CI_COMMIT_TAG
|
|
- if: $CI_COMMIT_BRANCH
|
|
|
|
variables:
|
|
GIT_STRATEGY: fetch
|
|
GIT_DEPTH: 100
|
|
CARGO_INCREMENTAL: 0
|
|
DOCKER_OS: "debian:bullseye"
|
|
ARCH: "x86_64"
|
|
CI_IMAGE: "paritytech/ci-linux:production"
|
|
BUILDAH_IMAGE: "quay.io/buildah/stable:v1.29"
|
|
BUILDAH_COMMAND: "buildah --storage-driver overlay2"
|
|
RELENG_SCRIPTS_BRANCH: "master"
|
|
|
|
RUSTY_CACHIER_SINGLE_BRANCH: master
|
|
RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true"
|
|
RUSTY_CACHIER_MINIO_ALIAS: rustycachier_gcs
|
|
RUSTY_CACHIER_MINIO_BUCKET: parity-build-rusty-cachier
|
|
RUSTY_CACHIER_COMPRESSION_METHOD: zstd
|
|
|
|
NEXTEST_FAILURE_OUTPUT: immediate-final
|
|
NEXTEST_SUCCESS_OUTPUT: final
|
|
ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.55"
|
|
|
|
default:
|
|
retry:
|
|
max: 2
|
|
when:
|
|
- runner_system_failure
|
|
- unknown_failure
|
|
- api_failure
|
|
cache: {}
|
|
interruptible: true
|
|
|
|
.collect-artifacts:
|
|
artifacts:
|
|
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
|
|
when: on_success
|
|
expire_in: 7 days
|
|
paths:
|
|
- artifacts/
|
|
|
|
.collect-artifacts-short:
|
|
artifacts:
|
|
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
|
|
when: on_success
|
|
expire_in: 3 hours
|
|
paths:
|
|
- artifacts/
|
|
|
|
.prepare-env:
|
|
before_script:
|
|
# TODO: remove unset invocation when we'll be free from 'ENV RUSTC_WRAPPER=sccache' & sccache
|
|
# itself in all images
|
|
- unset RUSTC_WRAPPER
|
|
# $WASM_BUILD_WORKSPACE_HINT enables wasm-builder to find the Cargo.lock from within generated
|
|
# packages
|
|
- export WASM_BUILD_WORKSPACE_HINT="$PWD"
|
|
# ensure that RUSTFLAGS are set correctly
|
|
- echo $RUSTFLAGS
|
|
|
|
.job-switcher:
|
|
before_script:
|
|
- if echo "$CI_DISABLED_JOBS" | grep -xF "$CI_JOB_NAME"; then echo "The job has been cancelled in CI settings"; exit 0; fi
|
|
|
|
.kubernetes-env:
|
|
image: "${CI_IMAGE}"
|
|
before_script:
|
|
- !reference [.timestamp, before_script]
|
|
- !reference [.job-switcher, before_script]
|
|
- !reference [.prepare-env, before_script]
|
|
tags:
|
|
- kubernetes-parity-build
|
|
|
|
.rust-info-script:
|
|
script:
|
|
- rustup show
|
|
- cargo --version
|
|
- rustup +nightly show
|
|
- cargo +nightly --version
|
|
|
|
.pipeline-stopper-vars:
|
|
script:
|
|
- !reference [.job-switcher, before_script]
|
|
- echo "Collecting env variables for the cancel-pipeline job"
|
|
- echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env
|
|
- echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env
|
|
- echo "PR_NUM=${CI_COMMIT_REF_NAME}" >> pipeline-stopper.env
|
|
|
|
.pipeline-stopper-artifacts:
|
|
artifacts:
|
|
reports:
|
|
dotenv: pipeline-stopper.env
|
|
|
|
.docker-env:
|
|
image: "${CI_IMAGE}"
|
|
before_script:
|
|
- !reference [.timestamp, before_script]
|
|
- !reference [.job-switcher, before_script]
|
|
- !reference [.prepare-env, before_script]
|
|
- !reference [.rust-info-script, script]
|
|
- !reference [.rusty-cachier, before_script]
|
|
- !reference [.pipeline-stopper-vars, script]
|
|
after_script:
|
|
- !reference [.rusty-cachier, after_script]
|
|
tags:
|
|
- linux-docker-vm-c2
|
|
|
|
# rusty-cachier's hidden job. Parts of this job are used to instrument the pipeline's other real jobs with rusty-cachier
|
|
# Description of the commands is available here - https://gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client#description
|
|
.rusty-cachier:
|
|
before_script:
|
|
- curl -s https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client/-/raw/release/util/install.sh | bash
|
|
- rusty-cachier environment check --gracefully
|
|
- $(rusty-cachier environment inject)
|
|
- rusty-cachier project mtime
|
|
after_script:
|
|
- env RUSTY_CACHIER_SUPRESS_OUTPUT=true rusty-cachier snapshot destroy
|
|
|
|
.test-refs:
|
|
rules:
|
|
- if: $CI_PIPELINE_SOURCE == "web"
|
|
- if: $CI_PIPELINE_SOURCE == "schedule"
|
|
- if: $CI_COMMIT_REF_NAME == "master"
|
|
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
|
|
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
|
|
|
|
# handle the specific case where benches could store incorrect bench data because of the downstream staging runs
|
|
# exclude cargo-check-benches from such runs
|
|
.test-refs-check-benches:
|
|
rules:
|
|
- if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "pipeline" && $CI_IMAGE =~ /staging$/
|
|
when: never
|
|
- if: $CI_PIPELINE_SOURCE == "web"
|
|
- if: $CI_PIPELINE_SOURCE == "schedule"
|
|
- if: $CI_COMMIT_REF_NAME == "master"
|
|
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
|
|
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
|
|
|
|
.test-refs-no-trigger:
|
|
rules:
|
|
- if: $CI_PIPELINE_SOURCE == "pipeline"
|
|
when: never
|
|
- if: $CI_PIPELINE_SOURCE == "web"
|
|
- if: $CI_PIPELINE_SOURCE == "schedule"
|
|
- if: $CI_COMMIT_REF_NAME == "master"
|
|
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
|
|
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
|
|
- if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/
|
|
|
|
.test-refs-no-trigger-prs-only:
|
|
rules:
|
|
- if: $CI_PIPELINE_SOURCE == "pipeline"
|
|
when: never
|
|
- if: $CI_PIPELINE_SOURCE == "web"
|
|
- if: $CI_PIPELINE_SOURCE == "schedule"
|
|
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
|
|
|
|
.publish-refs:
|
|
rules:
|
|
- if: $CI_PIPELINE_SOURCE == "pipeline"
|
|
when: never
|
|
- if: $CI_PIPELINE_SOURCE == "web"
|
|
- if: $CI_PIPELINE_SOURCE == "schedule"
|
|
- if: $CI_COMMIT_REF_NAME == "master"
|
|
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
|
|
|
|
.build-refs:
|
|
# publish-refs + PRs
|
|
rules:
|
|
- if: $CI_PIPELINE_SOURCE == "pipeline"
|
|
when: never
|
|
- if: $CI_PIPELINE_SOURCE == "web"
|
|
- if: $CI_PIPELINE_SOURCE == "schedule"
|
|
- if: $CI_COMMIT_REF_NAME == "master"
|
|
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
|
|
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
|
|
|
|
.zombienet-refs:
|
|
extends: .build-refs
|
|
|
|
.crates-publishing-variables:
|
|
variables:
|
|
CRATESIO_CRATES_OWNER: parity-crate-owner
|
|
REPO: substrate
|
|
REPO_OWNER: paritytech
|
|
|
|
.crates-publishing-pipeline:
|
|
extends: .crates-publishing-variables
|
|
rules:
|
|
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_COMMIT_REF_NAME == "master" && $PIPELINE == "automatic-crate-publishing"
|
|
|
|
.crates-publishing-template:
|
|
extends:
|
|
- .docker-env
|
|
- .crates-publishing-variables
|
|
# collect artifacts even on failure so that we know how the crates were generated (they'll be
|
|
# generated to the artifacts folder according to SPUB_TMP further down)
|
|
artifacts:
|
|
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
|
|
when: always
|
|
expire_in: 7 days
|
|
paths:
|
|
- artifacts/
|
|
variables:
|
|
SPUB_TMP: artifacts
|
|
# disable timestamping for the crate publishing jobs, they leave stray child processes behind
|
|
# which don't interact well with the timestamping script
|
|
CI_DISABLE_TIMESTAMP: 1
|
|
|
|
#### stage: .pre
|
|
|
|
check-crates-publishing-pipeline:
|
|
stage: .pre
|
|
extends:
|
|
- .kubernetes-env
|
|
- .crates-publishing-pipeline
|
|
script:
|
|
- git clone
|
|
--depth 1
|
|
--branch "$RELENG_SCRIPTS_BRANCH"
|
|
https://github.com/paritytech/releng-scripts.git
|
|
- ONLY_CHECK_PIPELINE=true ./releng-scripts/publish-crates
|
|
|
|
# By default our pipelines are interruptible, but some special pipelines shouldn't be interrupted:
|
|
# * multi-project pipelines such as the ones triggered by the scripts repo
|
|
# * the scheduled automatic-crate-publishing pipeline
|
|
#
|
|
# In those cases, we add an uninterruptible .pre job; once that one has started,
|
|
# the entire pipeline becomes uninterruptible
|
|
uninterruptible-pipeline:
|
|
extends: .kubernetes-env
|
|
variables:
|
|
CI_IMAGE: "paritytech/tools:latest"
|
|
stage: .pre
|
|
interruptible: false
|
|
rules:
|
|
- if: $CI_PIPELINE_SOURCE == "pipeline"
|
|
- if: $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "automatic-crate-publishing"
|
|
script: "true"
|
|
|
|
include:
|
|
# check jobs
|
|
- scripts/ci/gitlab/pipeline/check.yml
|
|
# tests jobs
|
|
- scripts/ci/gitlab/pipeline/test.yml
|
|
# build jobs
|
|
- scripts/ci/gitlab/pipeline/build.yml
|
|
# publish jobs
|
|
- scripts/ci/gitlab/pipeline/publish.yml
|
|
# zombienet jobs
|
|
- scripts/ci/gitlab/pipeline/zombienet.yml
|
|
# The crate-publishing pipeline requires a customized `interruptible` configuration. Unfortunately
|
|
# `interruptible` can't currently be dynamically set based on variables as per:
|
|
# - https://gitlab.com/gitlab-org/gitlab/-/issues/38349
|
|
# - https://gitlab.com/gitlab-org/gitlab/-/issues/194023
|
|
# Thus we work around that limitation by using conditional includes.
|
|
# For crate-publishing pipelines: run it with defaults + `interruptible: false`. The WHOLE
|
|
# pipeline is made uninterruptible to ensure that test jobs also get a chance to run to
|
|
# completion, because the publishing jobs depends on them AS INTENDED: crates should not be
|
|
# published before their source code is checked.
|
|
- project: parity/infrastructure/ci_cd/shared
|
|
ref: v0.2
|
|
file: /common/timestamp.yml
|
|
|
|
#### stage: notify
|
|
|
|
# This job notifies rusty-cachier about the latest commit with the cache.
|
|
# This info is later used for the cache distribution and an overlay creation.
|
|
# Note that we don't use any .rusty-cachier references as we assume that a pipeline has reached this stage with working rusty-cachier.
|
|
rusty-cachier-notify:
|
|
stage: notify
|
|
extends: .kubernetes-env
|
|
variables:
|
|
CI_IMAGE: paritytech/rusty-cachier-env:latest
|
|
GIT_STRATEGY: none
|
|
dependencies: []
|
|
script:
|
|
- curl -s https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client/-/raw/release/util/install.sh | bash
|
|
- rusty-cachier cache notify
|
|
|
|
#### stage: .post
|
|
|
|
# This job cancels the whole pipeline if any of provided jobs fail.
|
|
# In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests
|
|
# to fail the pipeline as soon as possible to shorten the feedback loop.
|
|
.cancel-pipeline-template:
|
|
stage: .post
|
|
rules:
|
|
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
|
|
when: on_failure
|
|
variables:
|
|
PROJECT_ID: "${CI_PROJECT_ID}"
|
|
PROJECT_NAME: "${CI_PROJECT_NAME}"
|
|
PIPELINE_ID: "${CI_PIPELINE_ID}"
|
|
FAILED_JOB_URL: "${FAILED_JOB_URL}"
|
|
FAILED_JOB_NAME: "${FAILED_JOB_NAME}"
|
|
PR_NUM: "${PR_NUM}"
|
|
trigger:
|
|
project: "parity/infrastructure/ci_cd/pipeline-stopper"
|
|
|
|
remove-cancel-pipeline-message:
|
|
stage: .post
|
|
rules:
|
|
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
|
|
variables:
|
|
PROJECT_ID: "${CI_PROJECT_ID}"
|
|
PROJECT_NAME: "${CI_PROJECT_NAME}"
|
|
PIPELINE_ID: "${CI_PIPELINE_ID}"
|
|
FAILED_JOB_URL: "https://gitlab.com"
|
|
FAILED_JOB_NAME: "nope"
|
|
PR_NUM: "${CI_COMMIT_REF_NAME}"
|
|
trigger:
|
|
project: "parity/infrastructure/ci_cd/pipeline-stopper"
|
|
branch: "as-improve"
|
|
|
|
# need to copy jobs this way because otherwise gitlab will wait
|
|
# for all 3 jobs to finish instead of cancelling if one fails
|
|
cancel-pipeline-test-linux-stable1:
|
|
extends: .cancel-pipeline-template
|
|
needs:
|
|
- job: "test-linux-stable 1/3"
|
|
|
|
cancel-pipeline-test-linux-stable2:
|
|
extends: .cancel-pipeline-template
|
|
needs:
|
|
- job: "test-linux-stable 2/3"
|
|
|
|
cancel-pipeline-test-linux-stable3:
|
|
extends: .cancel-pipeline-template
|
|
needs:
|
|
- job: "test-linux-stable 3/3"
|
|
|
|
cancel-pipeline-cargo-check-benches1:
|
|
extends: .cancel-pipeline-template
|
|
needs:
|
|
- job: "cargo-check-benches 1/2"
|
|
|
|
cancel-pipeline-cargo-check-benches2:
|
|
extends: .cancel-pipeline-template
|
|
needs:
|
|
- job: "cargo-check-benches 2/2"
|
|
|
|
cancel-pipeline-test-linux-stable-int:
|
|
extends: .cancel-pipeline-template
|
|
needs:
|
|
- job: test-linux-stable-int
|
|
|
|
cancel-pipeline-cargo-check-each-crate-1:
|
|
extends: .cancel-pipeline-template
|
|
needs:
|
|
- job: "cargo-check-each-crate 1/2"
|
|
|
|
cancel-pipeline-cargo-check-each-crate-2:
|
|
extends: .cancel-pipeline-template
|
|
needs:
|
|
- job: "cargo-check-each-crate 2/2"
|
|
|
|
cancel-pipeline-cargo-check-each-crate-macos:
|
|
extends: .cancel-pipeline-template
|
|
needs:
|
|
- job: cargo-check-each-crate-macos
|
|
|
|
cancel-pipeline-check-tracing:
|
|
extends: .cancel-pipeline-template
|
|
needs:
|
|
- job: check-tracing
|