mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-27 02:17:58 +00:00
[CI]Chaostest suite initiation (#5793)
* Initiate chaostest cli test suite: singlenodeheight on one dev node
Added chaostest stages in CI
Added new docker/k8s resources and environments to CI
Added new chaos-only tag to gitlab-ci.yml
* Update .maintain/chaostest/src/commands/singlenodeheight/index.js
Co-authored-by: Max Inden <mail@max-inden.de>
* change nameSpace to namespace(one word)
* update chaos ci job to match template
* rename build-pr ci stage to docker [chaos:basic]
* test gitlab-ci [chaos:basic]
* Update .gitlab-ci.yml
* add new build-chaos-only condition
* add *default-vars to singlenodeheight [chaos:basic]
* change build-only to build-rules on substrate jobs [chaos:basic]
* test and change when:on_success to when:always [chaos:basic]
* resolve conflicts and test [chaos:basic]
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Denis Pisarev <denis.pisarev@parity.io>
This commit is contained in:
+121
-11
@@ -26,6 +26,8 @@ stages:
|
||||
- test
|
||||
- build
|
||||
- post-build-test
|
||||
- docker
|
||||
- chaos
|
||||
- publish
|
||||
- deploy
|
||||
- flaming-fir
|
||||
@@ -64,12 +66,6 @@ default:
|
||||
- rustup show
|
||||
- cargo --version
|
||||
- sccache -s
|
||||
only:
|
||||
- master
|
||||
- /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
|
||||
- schedules
|
||||
- web
|
||||
- /^[0-9]+$/ # PRs
|
||||
retry:
|
||||
max: 2
|
||||
when:
|
||||
@@ -80,6 +76,14 @@ default:
|
||||
tags:
|
||||
- linux-docker
|
||||
|
||||
.docker-env-only: &docker-env-only
|
||||
only:
|
||||
- master
|
||||
- /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
|
||||
- schedules
|
||||
- web
|
||||
- /^[0-9]+$/ # PRs
|
||||
|
||||
.build-only: &build-only
|
||||
only:
|
||||
- master
|
||||
@@ -87,6 +91,27 @@ default:
|
||||
- /^pre-v[0-9]+\.[0-9]+-[0-9a-f]+$/
|
||||
- web
|
||||
|
||||
.build-rules: &build-rules
|
||||
rules:
|
||||
- if: '$DEPLOY_TAG'
|
||||
when: never
|
||||
- if: $CI_COMMIT_REF_NAME=="master"
|
||||
when: always
|
||||
- if: $CI_PIPELINE_SOURCE=="web"
|
||||
when: always
|
||||
- if: $CI_COMMIT_REF_NAME=~ /^v[0-9]+\.[0-9]+.*$/
|
||||
when: always
|
||||
- if: $CI_COMMIT_REF_NAME=~ /^pre-v[0-9]+\.[0-9]+-[0-9a-f]+$/
|
||||
when: always
|
||||
- if: '$CI_COMMIT_MESSAGE =~ /\[chaos:(basic|medium|large)\]/ && $CI_COMMIT_REF_NAME=~ /^[0-9]+$/' # i.e add [chaos:basic] in commit message to trigger
|
||||
when: always
|
||||
- when: never
|
||||
|
||||
.chaos-only: &chaos-only
|
||||
only:
|
||||
variables:
|
||||
- '$CI_COMMIT_MESSAGE =~ /\[chaos:(basic|medium|large)\]/ && $CI_COMMIT_REF_NAME=~ /^[0-9]+$/' # i.e add [chaos:basic] in commit message to trigger
|
||||
|
||||
#### stage: .pre
|
||||
|
||||
skip-if-draft:
|
||||
@@ -96,6 +121,10 @@ skip-if-draft:
|
||||
only:
|
||||
- /^[0-9]+$/ # Pull requests
|
||||
script:
|
||||
- echo "Commit message is ${CI_COMMIT_MESSAGE}"
|
||||
- echo "Ref is ${CI_COMMIT_REF_NAME}"
|
||||
- echo "pipeline source is ${CI_PIPELINE_SOURCE}"
|
||||
- echo "deploy tag is ${DEPLOY_TAG}"
|
||||
- ./.maintain/gitlab/skip_if_draft.sh
|
||||
|
||||
#### stage: check
|
||||
@@ -149,6 +178,7 @@ test-dependency-rules:
|
||||
cargo-audit:
|
||||
stage: test
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
except:
|
||||
- /^[0-9]+$/
|
||||
script:
|
||||
@@ -158,6 +188,7 @@ cargo-audit:
|
||||
cargo-deny:
|
||||
stage: test
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
only:
|
||||
- schedules
|
||||
- tags
|
||||
@@ -177,6 +208,7 @@ cargo-deny:
|
||||
cargo-check-benches:
|
||||
stage: test
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
script:
|
||||
- BUILD_DUMMY_WASM_BINARY=1 time cargo +nightly check --benches --all
|
||||
- cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small
|
||||
@@ -186,6 +218,7 @@ cargo-check-benches:
|
||||
cargo-check-subkey:
|
||||
stage: test
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
except:
|
||||
- /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
|
||||
script:
|
||||
@@ -217,6 +250,7 @@ test-deterministic-wasm:
|
||||
test-linux-stable: &test-linux
|
||||
stage: test
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
variables:
|
||||
<<: *default-vars
|
||||
# Enable debug assertions since we are running optimized builds for testing
|
||||
@@ -235,6 +269,7 @@ test-linux-stable: &test-linux
|
||||
unleash-check:
|
||||
stage: test
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
only:
|
||||
- master
|
||||
- tags
|
||||
@@ -246,6 +281,7 @@ test-frame-examples-compile-to-wasm:
|
||||
# into one job
|
||||
stage: test
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
variables:
|
||||
<<: *default-vars
|
||||
# Enable debug assertions since we are running optimized builds for testing
|
||||
@@ -289,6 +325,7 @@ test-linux-stable-int:
|
||||
check-web-wasm:
|
||||
stage: test
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
except:
|
||||
- /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
|
||||
script:
|
||||
@@ -308,6 +345,7 @@ check-web-wasm:
|
||||
test-full-crypto-feature:
|
||||
stage: test
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
variables:
|
||||
<<: *default-vars
|
||||
# Enable debug assertions since we are running optimized builds for testing
|
||||
@@ -328,6 +366,7 @@ cargo-check-macos:
|
||||
stage: test
|
||||
# shell runner on mac ignores the image set in *docker-env
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
script:
|
||||
- BUILD_DUMMY_WASM_BINARY=1 time cargo check --release
|
||||
- sccache -s
|
||||
@@ -356,6 +395,7 @@ check-polkadot-companion-status:
|
||||
check-polkadot-companion-build:
|
||||
stage: build
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
needs:
|
||||
- job: test-linux-stable-int
|
||||
artifacts: false
|
||||
@@ -368,6 +408,7 @@ check-polkadot-companion-build:
|
||||
test-browser-node:
|
||||
stage: build
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
needs:
|
||||
- job: check-web-wasm
|
||||
artifacts: false
|
||||
@@ -383,15 +424,12 @@ build-linux-substrate: &build-binary
|
||||
stage: build
|
||||
<<: *collect-artifacts
|
||||
<<: *docker-env
|
||||
<<: *build-only
|
||||
<<: *build-rules
|
||||
needs:
|
||||
- job: test-linux-stable
|
||||
artifacts: false
|
||||
before_script:
|
||||
- mkdir -p ./artifacts/substrate/
|
||||
except:
|
||||
variables:
|
||||
- $DEPLOY_TAG
|
||||
script:
|
||||
- WASM_BUILD_NO_COLOR=1 time cargo build --release --verbose
|
||||
- mv ./target/release/substrate ./artifacts/substrate/.
|
||||
@@ -411,7 +449,10 @@ build-linux-substrate: &build-binary
|
||||
|
||||
|
||||
build-linux-subkey: &build-subkey
|
||||
<<: *build-binary
|
||||
stage: build
|
||||
<<: *collect-artifacts
|
||||
<<: *docker-env
|
||||
<<: *build-only
|
||||
needs:
|
||||
- job: cargo-check-subkey
|
||||
artifacts: false
|
||||
@@ -441,6 +482,7 @@ build-macos-subkey:
|
||||
build-rust-doc-release:
|
||||
stage: build
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
allow_failure: true
|
||||
artifacts:
|
||||
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc"
|
||||
@@ -474,6 +516,73 @@ trigger-contracts-ci:
|
||||
- master
|
||||
- schedules
|
||||
|
||||
#### stage: docker
|
||||
docker-build-chaos: &docker-build-chaos
|
||||
<<: *chaos-only
|
||||
stage: docker
|
||||
needs:
|
||||
- job: build-linux-substrate
|
||||
image: docker:stable
|
||||
tags:
|
||||
- kubernetes-parity-build
|
||||
variables:
|
||||
<<: *default-vars
|
||||
DOCKER_HOST: tcp://localhost:2375
|
||||
DOCKER_DRIVER: overlay2
|
||||
PRODUCT: substrate
|
||||
DOCKERFILE: $PRODUCT.Dockerfile
|
||||
CONTAINER_IMAGE: paritypr/$PRODUCT
|
||||
environment:
|
||||
name: parity-chaosnet
|
||||
services:
|
||||
- docker:dind
|
||||
before_script:
|
||||
- test "$DOCKER_CHAOS_USER" -a "$DOCKER_CHAOS_TOKEN"
|
||||
|| ( echo "no docker credentials provided"; exit 1 )
|
||||
- docker login -u "$DOCKER_CHAOS_USER" -p "$DOCKER_CHAOS_TOKEN"
|
||||
- docker info
|
||||
script:
|
||||
- cd ./artifacts/$PRODUCT/
|
||||
- VERSION="ci-${CI_COMMIT_SHORT_SHA}"
|
||||
- echo "${PRODUCT} version = ${VERSION}"
|
||||
- test -z "${VERSION}" && exit 1
|
||||
- docker build
|
||||
--build-arg VCS_REF="${CI_COMMIT_SHA}"
|
||||
--build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')"
|
||||
--tag $CONTAINER_IMAGE:$VERSION
|
||||
--file $DOCKERFILE .
|
||||
- docker push $CONTAINER_IMAGE:$VERSION
|
||||
after_script:
|
||||
- docker logout
|
||||
|
||||
#### stage: chaos
|
||||
chaos-test-singlenodeheight:
|
||||
<<: *chaos-only
|
||||
stage: chaos
|
||||
image: parity/chaostools:latest
|
||||
needs:
|
||||
- job: docker-build-chaos
|
||||
tags:
|
||||
- parity-chaos
|
||||
variables:
|
||||
<<: *default-vars
|
||||
PRODUCT: substrate
|
||||
DOCKERFILE: $PRODUCT.Dockerfile
|
||||
CONTAINER_IMAGE: paritypr/$PRODUCT
|
||||
KEEP_NAMESPACE: 0
|
||||
NAMESPACE: "substrate-ci-${CI_COMMIT_SHORT_SHA}-${CI_PIPELINE_ID}"
|
||||
VERSION: "ci-${CI_COMMIT_SHORT_SHA}"
|
||||
interruptible: true
|
||||
environment:
|
||||
name: parity-chaosnet
|
||||
script:
|
||||
- cd ./.maintain/chaostest
|
||||
- npm link
|
||||
- chaostest spawn dev -i $CONTAINER_IMAGE:$VERSION
|
||||
- chaostest singlenodeheight -h 30
|
||||
after_script:
|
||||
- chaostest clean
|
||||
|
||||
#### stage: publish
|
||||
|
||||
.build-push-docker-image: &build-push-docker-image
|
||||
@@ -596,6 +705,7 @@ publish-draft-release:
|
||||
publish-to-crates-io:
|
||||
stage: publish
|
||||
<<: *docker-env
|
||||
<<: *docker-env-only
|
||||
only:
|
||||
- /^ci-release-.*$/
|
||||
- /^v[0-9]+\.[0-9]+\.[0-9]+.*$/
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
node_modules
|
||||
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"env": {
|
||||
"node": true,
|
||||
"commonjs": true,
|
||||
"es6": true
|
||||
},
|
||||
"extends": [
|
||||
"standard"
|
||||
],
|
||||
"globals": {
|
||||
"Atomics": "readonly",
|
||||
"SharedArrayBuffer": "readonly"
|
||||
},
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 2018
|
||||
},
|
||||
"rules": {
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
*-debug.log
|
||||
*-error.log
|
||||
/.nyc_output
|
||||
/dist
|
||||
/tmp
|
||||
/log
|
||||
.DS_Store
|
||||
.editorconfig
|
||||
yarn.lock
|
||||
node_modules
|
||||
/src/config/config.json
|
||||
@@ -0,0 +1,89 @@
|
||||
chaostest
|
||||
=========
|
||||
|
||||
A cli for chaos testing on substrate
|
||||
|
||||
[](https://oclif.io)
|
||||
[](https://npmjs.org/package/chaostest)
|
||||
[](https://npmjs.org/package/chaostest)
|
||||
|
||||
<!-- toc -->
|
||||
* [Usage](#usage)
|
||||
* [Commands](#commands)
|
||||
<!-- tocstop -->
|
||||
# Usage
|
||||
<!-- usage -->
|
||||
```sh-session
|
||||
$ npm install -g chaostest // yarn add global chaostest
|
||||
$ chaostest COMMAND
|
||||
running command...
|
||||
$ chaostest (-v|--version|version)
|
||||
chaostest/0.0.0 darwin-x64 node-v8.16.0
|
||||
$ chaostest --help [COMMAND]
|
||||
USAGE
|
||||
$ chaostest COMMAND
|
||||
...
|
||||
```
|
||||
<!-- usagestop -->
|
||||
# Commands
|
||||
<!-- commands -->
|
||||
* [`chaostest spawn`](#chaostest-spawn)
|
||||
* [`chaostest singlenodeheight`](#chaostest-singlenodeheight)
|
||||
* [`chaostest clean`](#chaostest-clean)
|
||||
|
||||
## `chaostest spawn`
|
||||
|
||||
Spawn a testnet based on your local k8s configuration. Could be either a dev node, a two node alicebob chain or a customized chain with various validators/fullnodes.
|
||||
|
||||
```
|
||||
USAGE
|
||||
$ chaostest spawn [ARGUMENTS] [FLAGS]
|
||||
|
||||
Arguments
|
||||
dev, a single fullnode in --dev mode
|
||||
alicebob, a two nodes private chain with Alice as bootnode and Bob as validator
|
||||
[chainName], a customized chain deployed with -v numbers of validators and -n numbers of fullnodes
|
||||
|
||||
Flags
|
||||
--image, -i, the image tag of the certain substrate version you want to deploy
|
||||
--port, -p, the port to expose when image is deployed in a pod
|
||||
--namespace, the desired namespace to deploy on
|
||||
--validator, -v, the number of substrate validators to deploy
|
||||
--node, -n, the number of full nodes, if not set but exists, default to 1
|
||||
|
||||
DESCRIPTION
|
||||
...
|
||||
Extra documentation goes here
|
||||
```
|
||||
|
||||
_See code: [src/commands/spawn/index.js](https://github.com/paritytech/substrate/blob/harry/chaostest-init/.maintain/chaostest/src/commands/spawn/index.js)_
|
||||
|
||||
## `chaostest singlenodeheight`
|
||||
|
||||
Test against a fullnode on --dev mode to check if it can successfully produce blocks to a certain height.
|
||||
|
||||
```
|
||||
USAGE
|
||||
$ chaostest singlenodeheight [FLAGS]
|
||||
|
||||
FLAGS
|
||||
-h , the desired height of blocks to check if reachable, this only works with integers smaller than 2^6
|
||||
-t, the wait time out before it halts the polling
|
||||
```
|
||||
|
||||
_See code: [src/commands/singlenodeheight/index.js](https://github.com/paritytech/substrate/blob/harry/chaostest-init/.maintain/chaostest/src/commands/singlenodeheight/index.js)_
|
||||
|
||||
## `chaostest clean`
|
||||
|
||||
Clean up the k8s deployment by namespace.
|
||||
|
||||
```
|
||||
USAGE
|
||||
$ chaostest clean [FLAGS]
|
||||
|
||||
FLAGS
|
||||
-n , the desired namespace to delete on your k8s cluster
|
||||
```
|
||||
|
||||
_See code: [src/commands/clean/index.js](https://github.com/paritytech/substrate/blob/harry/chaostest-init/.maintain/chaostest/src/commands/clean/index.js)_
|
||||
<!-- commandsstop -->
|
||||
Executable
+5
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
require('@oclif/command').run()
|
||||
.then(require('@oclif/command/flush'))
|
||||
.catch(require('@oclif/errors/handle'))
|
||||
@@ -0,0 +1,3 @@
|
||||
@echo off
|
||||
|
||||
node "%~dp0\run" %*
|
||||
+5950
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,61 @@
|
||||
{
|
||||
"name": "chaostest",
|
||||
"description": "A cli for chaos testing on substrate",
|
||||
"version": "0.0.0",
|
||||
"author": "HarryHong",
|
||||
"bin": {
|
||||
"chaostest": "./bin/run"
|
||||
},
|
||||
"bugs": "https://github.com/paritytech/substrate/issues",
|
||||
"dependencies": {
|
||||
"@kubernetes/client-node": "^0.11.1",
|
||||
"@oclif/command": "^1",
|
||||
"@oclif/config": "^1",
|
||||
"@oclif/plugin-help": "^2",
|
||||
"@polkadot/api": "^0.95.0-beta.14",
|
||||
"@polkadot/keyring": "^1.6.0-beta.9",
|
||||
"winston": "^3.2.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@oclif/dev-cli": "^1",
|
||||
"@oclif/test": "^1",
|
||||
"chai": "^4",
|
||||
"eslint": "^7.1.0",
|
||||
"eslint-config-oclif": "^3.1",
|
||||
"eslint-config-standard": "^14.1.1",
|
||||
"eslint-plugin-import": "^2.20.2",
|
||||
"eslint-plugin-node": "^11.1.0",
|
||||
"eslint-plugin-promise": "^4.2.1",
|
||||
"eslint-plugin-standard": "^4.0.1",
|
||||
"globby": "^10",
|
||||
"nyc": "^14"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8.0.0"
|
||||
},
|
||||
"files": [
|
||||
"/bin",
|
||||
"/npm-shrinkwrap.json",
|
||||
"/oclif.manifest.json",
|
||||
"/src"
|
||||
],
|
||||
"homepage": "https://github.com/paritytech/substrate/tree/master/.maintain/chaostest",
|
||||
"keywords": [
|
||||
"oclif"
|
||||
],
|
||||
"main": "src/index.js",
|
||||
"oclif": {
|
||||
"commands": "./src/commands",
|
||||
"bin": "chaostest",
|
||||
"plugins": [
|
||||
"@oclif/plugin-help"
|
||||
]
|
||||
},
|
||||
"repository": "https://github.com/paritytech/substrate/tree/master/.maintain/chaostest",
|
||||
"scripts": {
|
||||
"postpack": "rm -f oclif.manifest.json",
|
||||
"posttest": "eslint .",
|
||||
"prepack": "oclif-dev manifest && oclif-dev readme",
|
||||
"version": "oclif-dev readme && git add README.md"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
const { Command, flags } = require('@oclif/command')
|
||||
const CONFIG = require('../../config')()
|
||||
const logger = require('../../utils/logger')
|
||||
const Hypervisor = require('../../hypervisor')
|
||||
|
||||
class CleanCommand extends Command {
|
||||
async run () {
|
||||
const { flags } = this.parse(CleanCommand)
|
||||
const namespace = flags.namespace || CONFIG.namespace
|
||||
const hypervisor = new Hypervisor(CONFIG)
|
||||
// Delete corresponding namespace, default to CONFIG.namespace
|
||||
try {
|
||||
if (namespace) {
|
||||
await hypervisor.cleanup(namespace)
|
||||
} else {
|
||||
logger.debug('Nothing to clean up')
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(error)
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CleanCommand.description = 'Clean up resources based on namespace'
|
||||
|
||||
CleanCommand.flags = {
|
||||
namespace: flags.string({ char: 'n', description: 'desired namespace to clean up', env: 'NAMESPACE' })
|
||||
}
|
||||
|
||||
module.exports = CleanCommand
|
||||
@@ -0,0 +1,63 @@
|
||||
const { Command, flags } = require('@oclif/command')
|
||||
const CONFIG = require('../../config')()
|
||||
const { succeedExit, errorExit } = require('../../utils/exit')
|
||||
const Hypervisor = require('../../hypervisor')
|
||||
const logger = require('../../utils/logger')
|
||||
|
||||
class SingleNodeHeightCommand extends Command {
|
||||
async run () {
|
||||
const { flags } = this.parse(SingleNodeHeightCommand)
|
||||
let port = flags.port
|
||||
let url = flags.url
|
||||
const wait = flags.wait || 600 * 1000
|
||||
const height = flags.height || 10
|
||||
const namespace = flags.namespace || CONFIG.namespace
|
||||
const pod = flags.pod || (CONFIG.nodes && CONFIG.nodes[0]) ? CONFIG.nodes[0].podName : undefined
|
||||
const now = Date.now()
|
||||
|
||||
const hypervisor = new Hypervisor(CONFIG)
|
||||
if (!!url && !!port) {
|
||||
JsonRpcCallTestHeight(url, port)
|
||||
} else if (!!pod && !!namespace) {
|
||||
url = 'http://127.0.0.1'
|
||||
port = 9933
|
||||
await hypervisor.startForwardServer(namespace, pod, port)
|
||||
JsonRpcCallTestHeight(url, port)
|
||||
} else {
|
||||
errorExit('Not enough parameters provided. Either specify url and port or pod and namespace.')
|
||||
}
|
||||
|
||||
async function JsonRpcCallTestHeight (url, port) {
|
||||
logger.debug('Polling chain height...')
|
||||
if (Date.now() < now + wait) {
|
||||
try {
|
||||
const curHeight = await hypervisor.getChainBlockHeight(url, port)
|
||||
logger.debug('Current Block Height: ' + curHeight)
|
||||
if (curHeight > height) {
|
||||
logger.info(`Single dev node Blockheight reached ${height}`)
|
||||
succeedExit()
|
||||
} else {
|
||||
setTimeout(() => JsonRpcCallTestHeight(url, port), 2000)
|
||||
}
|
||||
} catch (error) {
|
||||
errorExit('Error requesting chain block height', error)
|
||||
}
|
||||
} else {
|
||||
errorExit('Timed out')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SingleNodeHeightCommand.description = 'Test if targeted node is producing blocks > certain height'
|
||||
|
||||
SingleNodeHeightCommand.flags = {
|
||||
port: flags.integer({ char: 'p', description: 'port to deploy' }),
|
||||
url: flags.string({ char: 'u', description: 'connect url' }),
|
||||
timeout: flags.string({ char: 't', description: 'wait time in miliseconds to halt' }),
|
||||
height: flags.string({ char: 'h', description: 'desired height to test' }),
|
||||
pod: flags.string({ description: 'desired pod to test' }),
|
||||
namespace: flags.string({ description: 'desired namespace to test' })
|
||||
}
|
||||
|
||||
module.exports = SingleNodeHeightCommand
|
||||
@@ -0,0 +1,52 @@
|
||||
const { Command, flags } = require('@oclif/command')
|
||||
const logger = require('../../utils/logger')
|
||||
const Hypervisor = require('../../hypervisor')
|
||||
const CONFIG = require('../../config')()
|
||||
|
||||
class SpawnCommand extends Command {
|
||||
async run () {
|
||||
const { flags } = this.parse(SpawnCommand)
|
||||
const { args } = this.parse(SpawnCommand)
|
||||
const imageTag = flags.image || 'parity/substrate:latest'
|
||||
const port = flags.port || 9933
|
||||
const namespace = flags.namespace || 'substrate-ci'
|
||||
const validator = flags.validator || 0
|
||||
const node = flags.node || 1
|
||||
|
||||
const hypervisor = new Hypervisor(CONFIG)
|
||||
try {
|
||||
// Check/Create namespace
|
||||
await hypervisor.readOrCreateNamespace(namespace)
|
||||
const chainName = args.chainName
|
||||
if (chainName) {
|
||||
if (chainName === 'dev') {
|
||||
logger.debug('Starting a fullnode in dev mode...')
|
||||
await hypervisor.createDevNode(imageTag, port)
|
||||
} else if (chainName === 'alicebob') {
|
||||
await hypervisor.createAliceBobNodes(imageTag, port)
|
||||
} else {
|
||||
// TODO: customized chain with chainName
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(error)
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SpawnCommand.description = 'Spawn a local testnet with options'
|
||||
|
||||
SpawnCommand.flags = {
|
||||
image: flags.string({ char: 'i', description: 'image to deploy' }),
|
||||
port: flags.integer({ char: 'p', description: 'port to deploy on' }),
|
||||
namespace: flags.string({ description: 'desired namespace to deploy to', env: 'NAMESPACE' }),
|
||||
validator: flags.string({ char: 'v', description: 'number of validators' }),
|
||||
node: flags.string({ char: 'n', description: 'number of full nodes, if not set but exists, default to 1' }),
|
||||
key: flags.string({ char: 'k', description: 'number of full nodes, if not set but exists, default to 1' }),
|
||||
chainspec: flags.string({ char: 'c', description: 'number of full nodes, if not set but exists, default to 1' })
|
||||
}
|
||||
|
||||
SpawnCommand.args = [{ name: 'chainName' }]
|
||||
|
||||
module.exports = SpawnCommand
|
||||
@@ -0,0 +1,34 @@
|
||||
chaostest CONFIG
|
||||
=========
|
||||
|
||||
Since deployment can behave differently, we want to keep a state between phases including different test subjects.
|
||||
|
||||
# Content
|
||||
The state could include informations such as:
|
||||
```
|
||||
{
|
||||
namespace,
|
||||
image,
|
||||
bootnode: {
|
||||
podname,
|
||||
ip,
|
||||
port,
|
||||
peerId,
|
||||
privateKey,
|
||||
publicKey
|
||||
},
|
||||
nodes: [{
|
||||
podname,
|
||||
ip,
|
||||
port,
|
||||
nodeType: 'validator' | 'bootnode' | ,
|
||||
privateKey (validator only),
|
||||
publicKey (validator only)
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
# TODO
|
||||
k8s configuration
|
||||
chainspec
|
||||
chaos-agent
|
||||
@@ -0,0 +1,70 @@
|
||||
const fs = require('fs')
|
||||
const path = require('path')
|
||||
const configPath = path.join(__dirname, './config.json')
|
||||
const logger = require('../utils/logger')
|
||||
|
||||
class Config {
|
||||
constructor () {
|
||||
this.load()
|
||||
}
|
||||
|
||||
async load () {
|
||||
fs.readFile(configPath, (err, data) => {
|
||||
if (err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
this.reset()
|
||||
} else {
|
||||
throw err
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
Object.assign(this, JSON.parse(data))
|
||||
} catch (error) {
|
||||
logger.error('config file is corrupted, resetting...')
|
||||
this.reset()
|
||||
}
|
||||
};
|
||||
})
|
||||
};
|
||||
|
||||
getConfig () {
|
||||
return this
|
||||
}
|
||||
|
||||
async update () {
|
||||
const data = JSON.stringify(this.getConfig())
|
||||
fs.writeFile(configPath, data, (err) => {
|
||||
if (err) throw err
|
||||
logger.debug('Configuration updated')
|
||||
})
|
||||
}
|
||||
|
||||
async setNamespace (namespace) {
|
||||
this.namespace = namespace
|
||||
this.update()
|
||||
}
|
||||
|
||||
async addNode (node) {
|
||||
if (!this.nodes || Array.isArray(this.nodes)) {
|
||||
this.nodes = []
|
||||
}
|
||||
if (node.nodeType === 'bootnode') {
|
||||
this.bootnode = node
|
||||
}
|
||||
this.nodes.push(node)
|
||||
this.update()
|
||||
}
|
||||
|
||||
async reset () {
|
||||
const data = JSON.stringify({})
|
||||
fs.writeFile(configPath, data, (err) => {
|
||||
if (err) throw err
|
||||
this.load()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = () => {
|
||||
const config = new Config()
|
||||
return config
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
const chainApi = require('../modules/chainApi')
|
||||
|
||||
exports.getApi = async function (endpoint) {
|
||||
if (this._apiInstance && this._apiInstance.endpoint === endpoint) {
|
||||
return this._apiInstance.instance
|
||||
} else {
|
||||
const instance = await chainApi.getApi(endpoint)
|
||||
this._apiInstance = { endpoint, instance }
|
||||
return instance
|
||||
}
|
||||
}
|
||||
|
||||
exports.getChainBlockHeight = async function (url, port) {
|
||||
const api = await this.getApi(url + ':' + port)
|
||||
return chainApi.getChainBlockHeight(api)
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
const api = require('./api')
|
||||
module.exports = function (Hypervisor) {
|
||||
Object.assign(Hypervisor.prototype, api)
|
||||
}
|
||||
@@ -0,0 +1,123 @@
|
||||
const k8s = require('../modules/k8s')
|
||||
const { pollUntil } = require('../../utils/wait')
|
||||
const { getBootNodeUrl } = require('../../utils')
|
||||
const logger = require('../../utils/logger')
|
||||
|
||||
exports.readOrCreateNamespace = async function (namespace) {
|
||||
try {
|
||||
logger.debug('Reading namespace')
|
||||
await k8s.readNamespace(namespace) // if namespace is available, do not create here
|
||||
} catch (error) {
|
||||
if (error.response.statusCode !== 404) {
|
||||
logger.error(error)
|
||||
throw error
|
||||
}
|
||||
logger.debug('Namespace not present, creating...')
|
||||
await k8s.createNamespace(namespace)
|
||||
}
|
||||
this.config.setNamespace(namespace)
|
||||
}
|
||||
exports.createAlice = async function (image, port) {
|
||||
const substrateArgs = [
|
||||
'--chain=local',
|
||||
'--node-key',
|
||||
'0000000000000000000000000000000000000000000000000000000000000001',
|
||||
'--validator',
|
||||
'--no-telemetry',
|
||||
'--rpc-cors',
|
||||
'all',
|
||||
'--alice']
|
||||
const nodeSpec = {
|
||||
nodeId: 'alice',
|
||||
image,
|
||||
port,
|
||||
args: substrateArgs
|
||||
}
|
||||
nodeSpec.extraInfo = {
|
||||
nodeType: 'bootnode',
|
||||
privateKey: '',
|
||||
publicKey: '',
|
||||
peerId: '12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp'
|
||||
}
|
||||
await this.createNode(nodeSpec)
|
||||
}
|
||||
|
||||
exports.createBob = async function (image, port) {
|
||||
const substrateArgs = [
|
||||
'--chain=local',
|
||||
'--node-key',
|
||||
'0000000000000000000000000000000000000000000000000000000000000002',
|
||||
'--validator',
|
||||
'--bob',
|
||||
'--no-telemetry',
|
||||
'--rpc-cors',
|
||||
'all',
|
||||
'--bootnodes',
|
||||
getBootNodeUrl(this.config.bootnode)]
|
||||
const nodeSpec = {
|
||||
nodeId: 'bob',
|
||||
image,
|
||||
port,
|
||||
args: substrateArgs
|
||||
}
|
||||
nodeSpec.extraInfo = {
|
||||
nodeType: 'validator',
|
||||
privateKey: '',
|
||||
publicKey: ''
|
||||
}
|
||||
await this.createNode(nodeSpec)
|
||||
}
|
||||
|
||||
exports.createAliceBobNodes = async function (image, port) {
|
||||
await this.createAlice(image, port)
|
||||
await this.createBob(image, port)
|
||||
}
|
||||
|
||||
exports.createDevNode = async function (image, port) {
|
||||
const substrateArgs = ['--dev', '--rpc-external', '--ws-external']
|
||||
const nodeSpec = {
|
||||
nodeId: 'node-1',
|
||||
image,
|
||||
port,
|
||||
args: substrateArgs
|
||||
}
|
||||
await this.createNode(nodeSpec)
|
||||
}
|
||||
|
||||
exports.createNode = async function (nodeSpec) {
|
||||
logger.info(`Creating ${nodeSpec.nodeId} as ${nodeSpec.extraInfo ? nodeSpec.extraInfo.nodeType : 'FullNode'} in ${this.config.namespace}`)
|
||||
await k8s.createPod(nodeSpec, this.config.namespace)
|
||||
logger.debug('Polling pod status')
|
||||
const pod = await pollUntil(
|
||||
() => k8s.getPod(nodeSpec.nodeId, this.config.namespace)
|
||||
)
|
||||
const nodeInfo = {
|
||||
podName: nodeSpec.nodeId,
|
||||
ip: pod.status.podIP,
|
||||
port: nodeSpec.port
|
||||
}
|
||||
if (nodeSpec.extraInfo) {
|
||||
Object.assign(nodeInfo, nodeSpec.extraInfo)
|
||||
}
|
||||
logger.info(`${nodeSpec.nodeId} is created`)
|
||||
this.config.addNode(nodeInfo)
|
||||
}
|
||||
|
||||
exports.cleanup = async function (namespace) {
|
||||
await k8s.deleteNamespace(namespace)
|
||||
if (namespace === this.config.namespace) {
|
||||
this.config.reset()
|
||||
}
|
||||
}
|
||||
|
||||
exports.getPodInfoInConfig = function (namespace, podName) {
|
||||
if (this.config.namespace === namespace && Array.isArray(this.config.nodes)) {
|
||||
return this.config.nodes.find((node) => node.podName === podName)
|
||||
} else {
|
||||
throw Error('No pod present in the namespace in config')
|
||||
}
|
||||
}
|
||||
|
||||
exports.startForwardServer = async function (namespace, pod, port, onReady) {
|
||||
await k8s.startForwardServer(namespace, pod, port, onReady)
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
const deployment = require('./deployment')
|
||||
module.exports = function (Hypervisor) {
|
||||
Object.assign(Hypervisor.prototype, deployment)
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
const CONFIG = require('../config')()
|
||||
|
||||
function Hypervisor (config) {
|
||||
this.config = config || CONFIG
|
||||
}
|
||||
|
||||
// Mount sub modules of the Hypervisor class
|
||||
require('./deployment')(Hypervisor)
|
||||
require('./chainApi')(Hypervisor)
|
||||
|
||||
module.exports = Hypervisor
|
||||
@@ -0,0 +1,18 @@
|
||||
const { ApiPromise, WsProvider } = require('@polkadot/api')
|
||||
const { HttpProvider } = require('@polkadot/rpc-provider')
|
||||
|
||||
const getApi = async (url) => {
|
||||
const httpProvider = new HttpProvider(url)
|
||||
return httpProvider
|
||||
// const api = await ApiPromise.create({ provider: wsProvider })
|
||||
// return api
|
||||
// TODO: tried to use websocket provider here, but the polkadot/api version is not stable yet, using http provider for now
|
||||
}
|
||||
|
||||
const getChainBlockHeight = async (provider) => {
|
||||
const data = await provider.send('chain_getBlock', [])
|
||||
const height = parseInt(data.block.header.number, 16)
|
||||
return height
|
||||
}
|
||||
|
||||
module.exports = { getApi, getChainBlockHeight }
|
||||
@@ -0,0 +1,113 @@
|
||||
const k8s = require('@kubernetes/client-node')
|
||||
const { isFunction } = require('../../utils')
|
||||
const logger = require('../../utils/logger')
|
||||
|
||||
// load k8s
|
||||
const kc = new k8s.KubeConfig()
|
||||
kc.loadFromDefault()
|
||||
|
||||
// load k8s Apis
|
||||
const k8sAppApi = kc.makeApiClient(k8s.AppsV1Api)
|
||||
const k8sCoreApi = kc.makeApiClient(k8s.CoreV1Api)
|
||||
|
||||
const createNamespace = async namespace => {
|
||||
const namespaceJson = {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Namespace',
|
||||
metadata: {
|
||||
name: namespace
|
||||
}
|
||||
}
|
||||
return await k8sCoreApi.createNamespace(namespaceJson)
|
||||
}
|
||||
|
||||
const readNamespace = async namespace => {
|
||||
return await k8sCoreApi.readNamespace(namespace)
|
||||
}
|
||||
|
||||
const createPod = async (nodeSpec, namespace) => {
|
||||
const { label, nodeId, image, args, port } = nodeSpec
|
||||
const spec = {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: label
|
||||
},
|
||||
name: nodeId
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
image: image,
|
||||
imagePullPolicy: 'Always',
|
||||
name: nodeId,
|
||||
ports: [{ containerPort: port }],
|
||||
args: args
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
return await k8sCoreApi.createNamespacedPod(namespace, spec)
|
||||
}
|
||||
|
||||
const getDeploymentStatus = async (deploymentName, namespace) => {
|
||||
const response = await k8sAppApi.readNamespacedDeploymentStatus(deploymentName, namespace)
|
||||
const status = response.response.body.status
|
||||
function getAvailability (item) {
|
||||
return item.type === 'Available'
|
||||
}
|
||||
if (status && status.conditions) {
|
||||
return status.conditions.find(getAvailability)
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
const deleteNamespace = async (namespace) => {
|
||||
logger.debug(`Taking down Namespace ${namespace}...`)
|
||||
if (process.env.KEEP_NAMESPACE && process.env.KEEP_NAMESPACE === 1) {
|
||||
return
|
||||
}
|
||||
return k8sCoreApi.deleteNamespace(namespace)
|
||||
}
|
||||
|
||||
const getNamespacedPods = async (namespace) => {
|
||||
const response = await k8sCoreApi.listNamespacedPod(namespace)
|
||||
return response.body.items
|
||||
}
|
||||
|
||||
const getPod = async (podName, namespace) => {
|
||||
const pods = await getNamespacedPods(namespace)
|
||||
const found = pods.find(
|
||||
(pod) => !!pod.metadata && pod.metadata.name === podName && !!pod.status && pod.status.podIP
|
||||
)
|
||||
if (!found) {
|
||||
throw Error(`GetNode(${podName}): node is not present in the cluster`)
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
const startForwardServer = async (namespace, pod, port, onReady) => new Promise((resolve, reject) => {
|
||||
const net = require('net')
|
||||
const forward = new k8s.PortForward(kc)
|
||||
|
||||
// This simple server just forwards traffic from itself to a service running in kubernetes
|
||||
// -> localhost:8080 -> port-forward-tunnel -> kubernetes-pod
|
||||
// This is basically equivalent to 'kubectl port-forward ...' but in Javascript.
|
||||
const server = net.createServer((socket) => {
|
||||
forward.portForward(namespace, pod, [port], socket, null, socket)
|
||||
})
|
||||
// TODO: add Ws proxy server to adopt the polkadot/api
|
||||
server.listen(port, '127.0.0.1', (err) => {
|
||||
if (err) {
|
||||
logger.error('Error starting server')
|
||||
reject(err)
|
||||
}
|
||||
logger.info('Forwarding server started, ready to connect')
|
||||
resolve()
|
||||
// Optional onReady hook when server started
|
||||
if (onReady && isFunction(onReady)) {
|
||||
onReady()
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
module.exports = { createNamespace, readNamespace, createPod, deleteNamespace, getDeploymentStatus, getPod, getNamespacedPods, startForwardServer }
|
||||
@@ -0,0 +1 @@
|
||||
module.exports = require('@oclif/command')
|
||||
@@ -0,0 +1,12 @@
|
||||
const logger = require('../utils/logger')
|
||||
|
||||
const succeedExit = function () {
|
||||
process.exit(0)
|
||||
}
|
||||
|
||||
const errorExit = function (msg, err) {
|
||||
logger.error(msg, err)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
module.exports = { succeedExit, errorExit }
|
||||
@@ -0,0 +1,9 @@
|
||||
const getBootNodeUrl = (bootnode) => {
|
||||
return `/dns4/${bootnode.ip}/tcp/30333/p2p/${bootnode.peerId}`
|
||||
}
|
||||
|
||||
const isFunction = (obj) => {
|
||||
return !!(obj && obj.constructor && obj.call && obj.apply)
|
||||
}
|
||||
|
||||
module.exports = { getBootNodeUrl, isFunction }
|
||||
@@ -0,0 +1,50 @@
|
||||
const winston = require('winston')
|
||||
const fs = require('fs')
|
||||
const logDir = 'log' // Or read from a configuration
|
||||
const { format, transports } = winston
|
||||
const env = process.env.NODE_ENV || 'development'
|
||||
const util = require('util')
|
||||
|
||||
if (!fs.existsSync(logDir)) {
|
||||
// Create the directory if it does not exist
|
||||
fs.mkdirSync(logDir)
|
||||
}
|
||||
|
||||
const logFormat = format.printf(info => {
|
||||
info.message = util.format(info.message)
|
||||
if (info.metadata && Object.keys(info.metadata).length) {
|
||||
info.message = util.format(info.message, info.metadata)
|
||||
}
|
||||
return `${info.timestamp} ${info.level}: ${info.message}`
|
||||
})
|
||||
|
||||
const logger = winston.createLogger({
|
||||
level: env === 'development' ? 'debug' : 'info',
|
||||
transports: [
|
||||
new transports.Console({
|
||||
format: format.combine(
|
||||
format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),
|
||||
// Format the metadata object
|
||||
format.metadata({ fillExcept: ['message', 'level', 'timestamp', 'label'] }),
|
||||
format.colorize(),
|
||||
logFormat
|
||||
)
|
||||
}),
|
||||
new winston.transports.File({
|
||||
level: env === 'development' ? 'debug' : 'info',
|
||||
filename: logDir + '/logs.log',
|
||||
format: format.combine(
|
||||
format.timestamp(),
|
||||
format.json()
|
||||
),
|
||||
maxsize: 1024 * 1024 * 10 // 10MB
|
||||
})
|
||||
],
|
||||
exceptionHandlers: [
|
||||
new winston.transports.File({
|
||||
filename: 'log/exceptions.log'
|
||||
})
|
||||
]
|
||||
})
|
||||
|
||||
module.exports = logger
|
||||
@@ -0,0 +1,32 @@
|
||||
const logger = require('./logger')
|
||||
/**
|
||||
* Wait n milliseconds
|
||||
*
|
||||
* @param n - In milliseconds
|
||||
*/
|
||||
function waitNMilliseconds (n) {
|
||||
return new Promise((resolve) => {
|
||||
setTimeout(resolve, n)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a function until that function correctly resolves
|
||||
*
|
||||
* @param fn - The function to run
|
||||
*/
|
||||
async function pollUntil (fn) {
|
||||
try {
|
||||
const result = await fn()
|
||||
|
||||
return result
|
||||
} catch (_error) {
|
||||
logger.error('Error polling', _error)
|
||||
logger.debug('awaiting...')
|
||||
await waitNMilliseconds(5000) // FIXME We can add exponential delay here
|
||||
|
||||
return pollUntil(fn)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { pollUntil, waitNMilliseconds }
|
||||
Reference in New Issue
Block a user