feat: Add rebrand CI/CD workflows to main branch
- Add 72 rebrand workflow files (polkadot→pezkuwi, substrate→bizinikiwi, cumulus→pezcumulus) - Add GitHub actions, issue templates, and configs - Removed unnecessary workflows (fork-sync, gitspiegel, upstream-tracker, sync-templates, backport) - Renamed zombienet test files to match new naming convention
This commit is contained in:
Executable
+70
@@ -0,0 +1,70 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# A script that checks each workspace crate individually.
|
||||
# It's relevant to check workspace crates individually because otherwise their compilation problems
|
||||
# due to feature misconfigurations won't be caught, as exemplified by
|
||||
# https://github.com/paritytech/substrate/issues/12705
|
||||
#
|
||||
# `check-each-crate.py target_group groups_total`
|
||||
#
|
||||
# - `target_group`: Integer starting from 1, the group this script should execute.
|
||||
# - `groups_total`: Integer starting from 1, total number of groups.
|
||||
# - `disable_forklift`: Boolean, whether to disable forklift or not.
|
||||
|
||||
import subprocess, sys
|
||||
|
||||
# Get all crates
|
||||
output = subprocess.check_output(["cargo", "tree", "--locked", "--workspace", "--depth", "0", "--prefix", "none"])
|
||||
|
||||
# Convert the output into a proper list
|
||||
crates = []
|
||||
for line in output.splitlines():
|
||||
if line != b"":
|
||||
line = line.decode('utf8').split(" ")
|
||||
crate_name = line[0]
|
||||
# The crate path is always the last element in the line.
|
||||
crate_path = line[len(line) - 1].replace("(", "").replace(")", "")
|
||||
crates.append((crate_name, crate_path))
|
||||
|
||||
# Make the list unique and sorted
|
||||
crates = list(set(crates))
|
||||
crates.sort()
|
||||
|
||||
target_group = int(sys.argv[1]) - 1
|
||||
groups_total = int(sys.argv[2])
|
||||
# Forklift is disabled by default since Pezkuwi doesn't have access to Parity's GCP infrastructure
|
||||
disable_forklift = True
|
||||
|
||||
print(f"Target group: {target_group}, Total groups: {groups_total}, Disable forklift: {disable_forklift}", file=sys.stderr)
|
||||
|
||||
if len(crates) == 0:
|
||||
print("No crates detected!", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Total crates: {len(crates)}", file=sys.stderr)
|
||||
|
||||
crates_per_group = len(crates) // groups_total
|
||||
|
||||
# If this is the last runner, we need to take care of crates
|
||||
# after the group that we lost because of the integer division.
|
||||
if target_group + 1 == groups_total:
|
||||
overflow_crates = len(crates) % groups_total
|
||||
else:
|
||||
overflow_crates = 0
|
||||
|
||||
print(f"Crates per group: {crates_per_group}", file=sys.stderr)
|
||||
|
||||
# Check each crate
|
||||
for i in range(0, crates_per_group + overflow_crates):
|
||||
crate = crates_per_group * target_group + i
|
||||
|
||||
print(f"Checking {crates[crate][0]}", file=sys.stderr)
|
||||
|
||||
cmd = ["cargo", "check", "--locked"]
|
||||
|
||||
cmd.insert(0, 'forklift') if not disable_forklift else None
|
||||
|
||||
res = subprocess.run(cmd, cwd = crates[crate][1])
|
||||
|
||||
if res.returncode != 0:
|
||||
sys.exit(1)
|
||||
+36
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env bash
|
||||
echo "Running script relative to `pwd`"
|
||||
# Find all README.docify.md files
|
||||
DOCIFY_FILES=$(find . -name "README.docify.md")
|
||||
|
||||
# Initialize a variable to track directories needing README regeneration
|
||||
NEED_REGENERATION=""
|
||||
|
||||
for file in $DOCIFY_FILES; do
|
||||
echo "Processing $file"
|
||||
|
||||
# Get the directory containing the docify file
|
||||
DIR=$(dirname "$file")
|
||||
|
||||
# Go to the directory and run cargo build
|
||||
cd "$DIR"
|
||||
cargo check --features generate-readme || { echo "Readme generation for $DIR failed. Ensure the crate compiles successfully and has a `generate-readme` feature which guards markdown compilation in the crate as follows: https://docs.rs/docify/latest/docify/macro.compile_markdown.html#conventions." && exit 1; }
|
||||
|
||||
# Check if README.md has any uncommitted changes
|
||||
git diff --exit-code README.md
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Found uncommitted changes in $DIR/README.md"
|
||||
NEED_REGENERATION="$NEED_REGENERATION $DIR"
|
||||
fi
|
||||
|
||||
# Return to the original directory
|
||||
cd - > /dev/null
|
||||
done
|
||||
|
||||
# Check if any directories need README regeneration
|
||||
if [ -n "$NEED_REGENERATION" ]; then
|
||||
echo "The following directories need README regeneration:"
|
||||
echo "$NEED_REGENERATION"
|
||||
exit 1
|
||||
fi
|
||||
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
'''
|
||||
Ensure that the prdoc files are valid.
|
||||
|
||||
# Example
|
||||
|
||||
```sh
|
||||
python3 -m pip install cargo-workspace
|
||||
python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/*.prdoc
|
||||
```
|
||||
|
||||
Produces example output:
|
||||
```pre
|
||||
🔎 Reading workspace pezkuwi-sdk/Cargo.toml
|
||||
📦 Checking 32 prdocs against 493 crates.
|
||||
✅ All prdocs are valid
|
||||
```
|
||||
'''
|
||||
|
||||
import os
|
||||
import yaml
|
||||
import argparse
|
||||
import cargo_workspace
|
||||
|
||||
def check_prdoc_crate_names(root, paths):
|
||||
'''
|
||||
Check that all crates of the `crates` section of each prdoc is present in the workspace.
|
||||
'''
|
||||
|
||||
print(f'🔎 Reading workspace {root}.')
|
||||
workspace = cargo_workspace.Workspace.from_path(root)
|
||||
crate_names = [crate.name for crate in workspace.crates]
|
||||
|
||||
print(f'📦 Checking {len(paths)} prdocs against {len(crate_names)} crates.')
|
||||
faulty = {}
|
||||
|
||||
for path in paths:
|
||||
with open(path, 'r') as f:
|
||||
prdoc = yaml.safe_load(f)
|
||||
|
||||
for crate in prdoc.get('crates', []):
|
||||
crate = crate['name']
|
||||
if crate in crate_names:
|
||||
continue
|
||||
|
||||
faulty.setdefault(path, []).append(crate)
|
||||
|
||||
if len(faulty) == 0:
|
||||
print('✅ All prdocs are valid.')
|
||||
else:
|
||||
print('❌ Some prdocs are invalid.')
|
||||
for path, crates in faulty.items():
|
||||
print(f'💥 {path} lists invalid crate: {", ".join(crates)}')
|
||||
exit(1)
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description='Check prdoc files')
|
||||
parser.add_argument('root', help='The cargo workspace manifest', metavar='root', type=str, nargs=1)
|
||||
parser.add_argument('prdoc', help='The prdoc files', metavar='prdoc', type=str, nargs='*')
|
||||
args = parser.parse_args()
|
||||
|
||||
if len(args.prdoc) == 0:
|
||||
print('❌ Need at least one prdoc file as argument.')
|
||||
exit(1)
|
||||
|
||||
return { 'root': os.path.abspath(args.root[0]), 'prdocs': args.prdoc }
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parse_args()
|
||||
check_prdoc_crate_names(args['root'], args['prdocs'])
|
||||
Executable
+124
@@ -0,0 +1,124 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import sys
|
||||
import logging
|
||||
import os
|
||||
|
||||
|
||||
def check_constant(spec_pallet_id, spec_pallet_value, meta_constant):
|
||||
"""
|
||||
Check a single constant
|
||||
|
||||
:param spec_pallet_id:
|
||||
:param spec_pallet_value:
|
||||
:param meta_constant:
|
||||
:return:
|
||||
"""
|
||||
if meta_constant['name'] == list(spec_pallet_value.keys())[0]:
|
||||
constant = meta_constant['name']
|
||||
res = list(spec_pallet_value.values())[0]["value"] == meta_constant["value"]
|
||||
|
||||
logging.debug(f" Checking pallet:{spec_pallet_id}/constants/{constant}")
|
||||
logging.debug(f" spec_pallet_value: {spec_pallet_value}")
|
||||
logging.debug(f" meta_constant: {meta_constant}")
|
||||
logging.info(f"pallet:{spec_pallet_id}/constants/{constant} -> {res}")
|
||||
return res
|
||||
else:
|
||||
# logging.warning(f" Skipping pallet:{spec_pallet_id}/constants/{meta_constant['name']}")
|
||||
pass
|
||||
|
||||
|
||||
def check_pallet(metadata, spec_pallet):
|
||||
"""
|
||||
Check one pallet
|
||||
|
||||
:param metadata:
|
||||
:param spec_pallet_id:
|
||||
:param spec_pallet_value:
|
||||
:return:
|
||||
"""
|
||||
|
||||
spec_pallet_id, spec_pallet_value = spec_pallet
|
||||
logging.debug(f"Pallet: {spec_pallet_id}")
|
||||
|
||||
metadata_pallets = metadata["pallets"]
|
||||
metadata_pallet = metadata_pallets[spec_pallet_id]
|
||||
|
||||
res = map(lambda meta_constant_value: check_constant(
|
||||
spec_pallet_id, spec_pallet_value["constants"], meta_constant_value),
|
||||
metadata_pallet["constants"].values())
|
||||
res = list(filter(lambda item: item is not None, res))
|
||||
return all(res)
|
||||
|
||||
|
||||
def check_pallets(metadata, specs):
|
||||
"""
|
||||
CHeck all pallets
|
||||
|
||||
:param metadata:
|
||||
:param specs:
|
||||
:return:
|
||||
"""
|
||||
|
||||
res = list(map(lambda spec_pallet: check_pallet(metadata, spec_pallet),
|
||||
specs['pallets'].items()))
|
||||
res = list(filter(lambda item: item is not None, res))
|
||||
return all(res)
|
||||
|
||||
|
||||
def check_metadata(metadata, specs):
|
||||
"""
|
||||
Check metadata (json) against a list of expectations
|
||||
|
||||
:param metadata: Metadata in JSON format
|
||||
:param expectation: Expectations
|
||||
:return: Bool
|
||||
"""
|
||||
|
||||
res = check_pallets(metadata, specs)
|
||||
return res
|
||||
|
||||
|
||||
def help():
|
||||
""" Show some simple help """
|
||||
|
||||
print(f"You must pass 2 args, you passed {len(sys.argv) - 1}")
|
||||
print("Sample call:")
|
||||
print("check-runtime.py <metadata.json> <specs.json>")
|
||||
|
||||
|
||||
def load_json(file):
|
||||
""" Load json from a file """
|
||||
|
||||
f = open(file)
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def main():
|
||||
LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
|
||||
logging.basicConfig(level=LOGLEVEL)
|
||||
|
||||
if len(sys.argv) != 3:
|
||||
help()
|
||||
exit(1)
|
||||
|
||||
metadata_file = sys.argv[1]
|
||||
specs_file = sys.argv[2]
|
||||
print(f"Checking metadata from: {metadata_file} with specs from: {specs_file}")
|
||||
|
||||
metadata = load_json(metadata_file)
|
||||
specs = load_json(specs_file)
|
||||
|
||||
res = check_metadata(metadata, specs)
|
||||
|
||||
if res:
|
||||
logging.info(f"OK")
|
||||
exit(0)
|
||||
else:
|
||||
print("")
|
||||
logging.info(f"Some errors were found, run again with LOGLEVEL=debug")
|
||||
exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,179 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Ensures that:
|
||||
# - all crates are added to the root workspace
|
||||
# - local dependencies are resolved via `path`
|
||||
#
|
||||
# It does not check that the local paths resolve to the correct crate. This is already done by cargo.
|
||||
#
|
||||
# Must be called with a folder containing a `Cargo.toml` workspace file.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import toml
|
||||
import argparse
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description='Check Rust workspace integrity.')
|
||||
|
||||
parser.add_argument('workspace_dir', help='The directory to check', metavar='workspace_dir', type=str, nargs=1)
|
||||
parser.add_argument('--exclude', help='Exclude crate paths from the check', metavar='exclude', type=str, nargs='*', default=[])
|
||||
|
||||
args = parser.parse_args()
|
||||
return (args.workspace_dir[0], args.exclude)
|
||||
|
||||
def main(root, exclude):
|
||||
workspace_crates = get_members(root, exclude)
|
||||
all_crates = get_crates(root, exclude)
|
||||
print(f'📦 Found {len(all_crates)} crates in total')
|
||||
|
||||
check_duplicates(workspace_crates)
|
||||
check_missing(workspace_crates, all_crates)
|
||||
check_links(all_crates)
|
||||
|
||||
# Extract all members from a workspace.
|
||||
# Return: list of all workspace paths
|
||||
def get_members(workspace_dir, exclude):
|
||||
print(f'🔎 Indexing workspace {os.path.abspath(workspace_dir)}')
|
||||
|
||||
root_manifest_path = os.path.join(workspace_dir, "Cargo.toml")
|
||||
if not os.path.exists(root_manifest_path):
|
||||
print(f'❌ No root manifest found at {root_manifest}')
|
||||
sys.exit(1)
|
||||
|
||||
root_manifest = toml.load(root_manifest_path)
|
||||
if not 'workspace' in root_manifest:
|
||||
print(f'❌ No workspace found in root {root_manifest_path}')
|
||||
sys.exit(1)
|
||||
|
||||
if not 'members' in root_manifest['workspace']:
|
||||
return []
|
||||
|
||||
members = []
|
||||
for member in root_manifest['workspace']['members']:
|
||||
if member in exclude:
|
||||
print(f'❌ Excluded member should not appear in the workspace {member}')
|
||||
sys.exit(1)
|
||||
members.append(member)
|
||||
|
||||
return members
|
||||
|
||||
# List all members of the workspace.
|
||||
# Return: Map name -> (path, manifest)
|
||||
def get_crates(workspace_dir, exclude_crates) -> dict:
|
||||
crates = {}
|
||||
|
||||
for root, _dirs, files in os.walk(workspace_dir):
|
||||
if "target" in root:
|
||||
continue
|
||||
for file in files:
|
||||
if file != "Cargo.toml":
|
||||
continue
|
||||
|
||||
path = os.path.join(root, file)
|
||||
with open(path, "r") as f:
|
||||
content = f.read()
|
||||
manifest = toml.loads(content)
|
||||
|
||||
if 'workspace' in manifest:
|
||||
if root != workspace_dir:
|
||||
print("⏩ Excluded recursive workspace at %s" % path)
|
||||
continue
|
||||
|
||||
# Cut off the root path and the trailing /Cargo.toml.
|
||||
path = path[len(workspace_dir)+1:-11]
|
||||
name = manifest['package']['name']
|
||||
if path in exclude_crates:
|
||||
print("⏩ Excluded crate %s at %s" % (name, path))
|
||||
continue
|
||||
crates[name] = (path, manifest)
|
||||
|
||||
return crates
|
||||
|
||||
# Check that there are no duplicate entries in the workspace.
|
||||
def check_duplicates(workspace_crates):
|
||||
print(f'🔎 Checking for duplicate crates')
|
||||
found = {}
|
||||
for path in workspace_crates:
|
||||
if path in found:
|
||||
print(f'❌ crate is listed twice in the workspace {path}')
|
||||
sys.exit(1)
|
||||
found[path] = True
|
||||
|
||||
# Check that all crates are in the workspace.
|
||||
def check_missing(workspace_crates, all_crates):
|
||||
print(f'🔎 Checking for missing crates')
|
||||
if len(workspace_crates) == len(all_crates):
|
||||
print(f'✅ All {len(all_crates)} crates are in the workspace')
|
||||
return
|
||||
|
||||
missing = []
|
||||
# Find out which ones are missing.
|
||||
for name, (path, manifest) in all_crates.items():
|
||||
if not path in workspace_crates:
|
||||
missing.append([name, path, manifest])
|
||||
missing.sort()
|
||||
|
||||
for name, path, _manifest in missing:
|
||||
print("❌ %s in %s" % (name, path))
|
||||
print(f'😱 {len(all_crates) - len(workspace_crates)} crates are missing from the workspace')
|
||||
sys.exit(1)
|
||||
|
||||
# Check that all local dependencies are good.
|
||||
def check_links(all_crates):
|
||||
print(f'🔎 Checking for broken dependency links')
|
||||
links = []
|
||||
broken = []
|
||||
|
||||
for name, (_path, manifest) in all_crates.items():
|
||||
def check_deps(deps):
|
||||
for dep in deps:
|
||||
# Could be renamed:
|
||||
dep_name = dep
|
||||
if 'package' in deps[dep]:
|
||||
dep_name = deps[dep]['package']
|
||||
if dep_name in all_crates:
|
||||
links.append((name, dep_name))
|
||||
|
||||
# For pezkuwi-sdk umbrella crate: accept both path and workspace inheritance
|
||||
# For all other crates: require workspace inheritance
|
||||
if name == 'pezkuwi-sdk':
|
||||
has_path = 'path' in deps[dep]
|
||||
has_workspace = 'workspace' in deps[dep] and deps[dep]['workspace']
|
||||
if not has_path and not has_workspace:
|
||||
broken.append((name, dep_name, "crate must use path or workspace inheritance"))
|
||||
return
|
||||
elif not 'workspace' in deps[dep] or not deps[dep]['workspace']:
|
||||
broken.append((name, dep_name, "crate must use workspace inheritance"))
|
||||
return
|
||||
|
||||
def check_crate(deps):
|
||||
to_checks = ['dependencies', 'dev-dependencies', 'build-dependencies']
|
||||
|
||||
for to_check in to_checks:
|
||||
if to_check in deps:
|
||||
check_deps(deps[to_check])
|
||||
|
||||
# There could possibly target dependant deps:
|
||||
if 'target' in manifest:
|
||||
# Target dependant deps can only have one level of nesting:
|
||||
for _, target in manifest['target'].items():
|
||||
check_crate(target)
|
||||
|
||||
check_crate(manifest)
|
||||
|
||||
links.sort()
|
||||
broken.sort()
|
||||
|
||||
if len(broken) > 0:
|
||||
for (l, r, reason) in broken:
|
||||
print(f'❌ {l} -> {r} ({reason})')
|
||||
|
||||
print("💥 %d out of %d links are broken" % (len(broken), len(links)))
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("✅ All %d internal dependency links are correct" % len(links))
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
main(args[0], args[1])
|
||||
+93
@@ -0,0 +1,93 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Validates the .github/zombienet-flaky-tests file to ensure:
|
||||
# 1. Each entry has the correct format: <test-name>:<issue-number>
|
||||
# 2. The referenced number is a GitHub Issue
|
||||
# 3. The GitHub issue exists
|
||||
# 4. The issue is OPEN (warns if closed)
|
||||
|
||||
set -uo pipefail
|
||||
|
||||
FLAKY_TESTS_FILE="${1:-.github/zombienet-flaky-tests}"
|
||||
|
||||
if [[ ! -f "$FLAKY_TESTS_FILE" ]]; then
|
||||
echo "Error: File not found: $FLAKY_TESTS_FILE" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v gh &> /dev/null; then
|
||||
echo "Error: gh CLI is not installed" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Validating $FLAKY_TESTS_FILE..."
|
||||
echo
|
||||
|
||||
has_errors=false
|
||||
line_num=0
|
||||
|
||||
while IFS= read -r line || [[ -n "$line" ]]; do
|
||||
line_num=$((line_num + 1))
|
||||
|
||||
if [[ -z "$line" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Parse format: test-name:issue-number
|
||||
if [[ ! "$line" =~ ^([^:]+):([0-9]+)$ ]]; then
|
||||
echo "❌ Line $line_num: Missing required issue number" >&2
|
||||
echo " Entry: '$line'" >&2
|
||||
echo " Expected format: <test-name>:<issue-number>" >&2
|
||||
echo " Example: zombienet-pezkuwi-test-name:1234" >&2
|
||||
has_errors=true
|
||||
continue
|
||||
fi
|
||||
|
||||
test_name="${BASH_REMATCH[1]}"
|
||||
issue_number="${BASH_REMATCH[2]}"
|
||||
|
||||
set +e
|
||||
issue_data=$(gh issue view "$issue_number" --json state,title,url 2>&1)
|
||||
gh_exit_code=$?
|
||||
set -e
|
||||
|
||||
if [[ $gh_exit_code -ne 0 ]]; then
|
||||
echo "❌ Line $line_num: Issue #$issue_number does not exist" >&2
|
||||
echo " Test: $test_name" >&2
|
||||
has_errors=true
|
||||
continue
|
||||
fi
|
||||
|
||||
url=$(echo "$issue_data" | jq -r '.url')
|
||||
state=$(echo "$issue_data" | jq -r '.state')
|
||||
title=$(echo "$issue_data" | jq -r '.title')
|
||||
|
||||
# Check if it's an issue (not a PR) by verifying the URL contains '/issues/'
|
||||
if [[ ! "$url" =~ /issues/ ]]; then
|
||||
echo "❌ Line $line_num: #$issue_number is a Pull Request, not an Issue" >&2
|
||||
echo " Test: $test_name" >&2
|
||||
echo " URL: $url" >&2
|
||||
echo " Please reference a GitHub Issue, not a PR" >&2
|
||||
has_errors=true
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$state" == "OPEN" ]]; then
|
||||
echo "✅ Line $line_num: $test_name -> Issue #$issue_number (open)"
|
||||
else
|
||||
echo "⚠️ Line $line_num: Issue #$issue_number is closed: '$title'" >&2
|
||||
echo " Test: $test_name" >&2
|
||||
echo " Consider removing this entry if the issue is resolved." >&2
|
||||
fi
|
||||
|
||||
done < "$FLAKY_TESTS_FILE"
|
||||
|
||||
echo
|
||||
|
||||
if [[ "$has_errors" == "true" ]]; then
|
||||
echo "❌ Validation failed with errors" >&2
|
||||
exit 1
|
||||
else
|
||||
echo "✅ All entries are valid"
|
||||
exit 0
|
||||
fi
|
||||
@@ -0,0 +1,62 @@
|
||||
# Command Bot Documentation
|
||||
|
||||
The command bot allows contributors to perform self-service actions on PRs using comment commands.
|
||||
|
||||
## Available Commands
|
||||
|
||||
### Label Command (Self-service)
|
||||
|
||||
Add labels to your PR without requiring maintainer intervention:
|
||||
|
||||
```bash
|
||||
/cmd label T1-FRAME # Add single label
|
||||
/cmd label T1-FRAME R0-no-crate-publish-required # Add multiple labels
|
||||
/cmd label T1-FRAME A2-substantial D3-involved # Add multiple labels
|
||||
```
|
||||
|
||||
**Available Labels:**
|
||||
The bot dynamically fetches all current labels from the repository, ensuring it's always up-to-date. For label meanings and descriptions, see the [official label documentation](https://docs.pezkuwichain.io/labels/doc_pezkuwi-sdk.html).
|
||||
|
||||
**Features**:
|
||||
- **Auto-Correction**: Automatically fixes high-confidence typos (e.g., `T1-FRAM` → `T1-FRAME`)
|
||||
- **Case Fixing**: Handles case variations (e.g., `I2-Bug` → `I2-bug`)
|
||||
- **Smart Suggestions**: For ambiguous inputs, provides multiple options to choose from
|
||||
|
||||
### Other Commands
|
||||
|
||||
```bash
|
||||
/cmd fmt # Format code (cargo +nightly fmt and taplo)
|
||||
/cmd prdoc # Generate PR documentation
|
||||
/cmd bench # Run benchmarks
|
||||
/cmd update-ui # Update UI tests
|
||||
/cmd --help # Show help for all commands
|
||||
```
|
||||
|
||||
### Common Flags
|
||||
|
||||
- `--quiet`: Don't post start/end messages in PR
|
||||
- `--clean`: Clean up previous bot comments
|
||||
- `--image <image>`: Override docker image
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **Command Detection**: The bot listens for comments starting with `/cmd` on PRs
|
||||
2. **Permission Check**: Verifies if the user is an organization member
|
||||
3. **Command Execution**: Runs the specified command in a containerized environment
|
||||
4. **Result Handling**:
|
||||
- For label commands: Applies labels via GitHub API
|
||||
- For other commands: Commits changes back to the PR branch
|
||||
5. **Feedback**: Posts success/failure messages in the PR
|
||||
|
||||
## Security
|
||||
|
||||
- Organization member check prevents unauthorized usage
|
||||
- Commands from non-members run using bot scripts from master branch
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If a command fails:
|
||||
1. Check the GitHub Actions logs linked in the bot's comment
|
||||
2. Verify the command syntax matches the examples
|
||||
3. Ensure you have permission to perform the action
|
||||
4. For label commands, verify the label names are in the allowed list
|
||||
@@ -0,0 +1,26 @@
|
||||
import argparse
|
||||
|
||||
"""
|
||||
|
||||
Custom help action for argparse, it prints the help message for the main parser and all subparsers.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class _HelpAction(argparse._HelpAction):
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
parser.print_help()
|
||||
|
||||
# retrieve subparsers from parser
|
||||
subparsers_actions = [
|
||||
action for action in parser._actions
|
||||
if isinstance(action, argparse._SubParsersAction)]
|
||||
# there will probably only be one subparser_action,
|
||||
# but better save than sorry
|
||||
for subparsers_action in subparsers_actions:
|
||||
# get all subparsers and print help
|
||||
for choice, subparser in subparsers_action.choices.items():
|
||||
print("\n### Command '{}'".format(choice))
|
||||
print(subparser.format_help())
|
||||
|
||||
parser.exit()
|
||||
Executable
+565
@@ -0,0 +1,565 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import argparse
|
||||
import _help
|
||||
import importlib.util
|
||||
import re
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
import difflib
|
||||
|
||||
_HelpAction = _help._HelpAction
|
||||
|
||||
f = open('.github/workflows/runtimes-matrix.json', 'r')
|
||||
runtimesMatrix = json.load(f)
|
||||
|
||||
runtimeNames = list(map(lambda x: x['name'], runtimesMatrix))
|
||||
|
||||
common_args = {
|
||||
'--quiet': {"action": "store_true", "help": "Won't print start/end/failed messages in PR"},
|
||||
'--clean': {"action": "store_true", "help": "Clean up the previous bot's & author's comments in PR"},
|
||||
'--image': {"help": "Override docker image '--image docker.io/paritytech/ci-unified:latest'"},
|
||||
}
|
||||
|
||||
def print_and_log(message, output_file='/tmp/cmd/command_output.log'):
|
||||
print(message)
|
||||
with open(output_file, 'a') as f:
|
||||
f.write(message + '\n')
|
||||
|
||||
def setup_logging():
|
||||
if not os.path.exists('/tmp/cmd'):
|
||||
os.makedirs('/tmp/cmd')
|
||||
open('/tmp/cmd/command_output.log', 'w')
|
||||
|
||||
def fetch_repo_labels():
|
||||
"""Fetch current labels from the GitHub repository"""
|
||||
try:
|
||||
# Use GitHub API to get current labels
|
||||
repo_owner = os.environ.get('GITHUB_REPOSITORY_OWNER', 'pezkuwichain')
|
||||
repo_name = os.environ.get('GITHUB_REPOSITORY', 'pezkuwichain/pezkuwi-sdk').split('/')[-1]
|
||||
|
||||
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/labels?per_page=100"
|
||||
|
||||
# Add GitHub token if available for higher rate limits
|
||||
headers = {'User-Agent': 'pezkuwi-sdk-cmd-bot'}
|
||||
github_token = os.environ.get('GITHUB_TOKEN')
|
||||
if github_token:
|
||||
headers['Authorization'] = f'token {github_token}'
|
||||
|
||||
req = urllib.request.Request(api_url, headers=headers)
|
||||
|
||||
with urllib.request.urlopen(req) as response:
|
||||
if response.getcode() == 200:
|
||||
labels_data = json.loads(response.read().decode())
|
||||
label_names = [label['name'] for label in labels_data]
|
||||
print_and_log(f"Fetched {len(label_names)} labels from repository")
|
||||
return label_names
|
||||
else:
|
||||
print_and_log(f"Failed to fetch labels: HTTP {response.getcode()}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print_and_log(f"Error fetching labels from repository: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def check_pr_status(pr_number):
|
||||
"""Check if PR is merged or in merge queue"""
|
||||
try:
|
||||
# Get GitHub token from environment
|
||||
github_token = os.environ.get('GITHUB_TOKEN')
|
||||
if not github_token:
|
||||
print_and_log("Error: GITHUB_TOKEN not set, cannot verify PR status")
|
||||
return False # Prevent labeling if we can't check status
|
||||
|
||||
repo_owner = os.environ.get('GITHUB_REPOSITORY_OWNER', 'pezkuwichain')
|
||||
repo_name = os.environ.get('GITHUB_REPOSITORY', 'pezkuwichain/pezkuwi-sdk').split('/')[-1]
|
||||
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/pulls/{pr_number}"
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'pezkuwi-sdk-cmd-bot',
|
||||
'Authorization': f'token {github_token}',
|
||||
'Accept': 'application/vnd.github.v3+json'
|
||||
}
|
||||
|
||||
req = urllib.request.Request(api_url, headers=headers)
|
||||
|
||||
with urllib.request.urlopen(req) as response:
|
||||
if response.getcode() == 200:
|
||||
data = json.loads(response.read().decode())
|
||||
|
||||
# Check if PR is merged
|
||||
if data.get('merged', False):
|
||||
return False
|
||||
|
||||
# Check if PR is closed
|
||||
if data.get('state') == 'closed':
|
||||
return False
|
||||
|
||||
# Check if PR is in merge queue (auto_merge enabled)
|
||||
if data.get('auto_merge') is not None:
|
||||
return False
|
||||
|
||||
return True # PR is open and not in merge queue
|
||||
else:
|
||||
print_and_log(f"Failed to fetch PR status: HTTP {response.getcode()}")
|
||||
return False # Prevent labeling if we can't check status
|
||||
except Exception as e:
|
||||
print_and_log(f"Error checking PR status: {e}")
|
||||
return False # Prevent labeling if we can't check status
|
||||
|
||||
|
||||
def find_closest_labels(invalid_label, valid_labels, max_suggestions=3, cutoff=0.6):
|
||||
"""Find the closest matching labels using fuzzy string matching"""
|
||||
# Get close matches using difflib
|
||||
close_matches = difflib.get_close_matches(
|
||||
invalid_label,
|
||||
valid_labels,
|
||||
n=max_suggestions,
|
||||
cutoff=cutoff
|
||||
)
|
||||
|
||||
return close_matches
|
||||
|
||||
def auto_correct_labels(invalid_labels, valid_labels, auto_correct_threshold=0.8):
|
||||
"""Automatically correct labels when confidence is high, otherwise suggest"""
|
||||
corrections = []
|
||||
suggestions = []
|
||||
|
||||
for invalid_label in invalid_labels:
|
||||
closest = find_closest_labels(invalid_label, valid_labels, max_suggestions=1)
|
||||
|
||||
if closest:
|
||||
# Calculate similarity for the top match
|
||||
top_match = closest[0]
|
||||
similarity = difflib.SequenceMatcher(None, invalid_label.lower(), top_match.lower()).ratio()
|
||||
|
||||
if similarity >= auto_correct_threshold:
|
||||
# High confidence - auto-correct
|
||||
corrections.append((invalid_label, top_match))
|
||||
else:
|
||||
# Lower confidence - suggest alternatives
|
||||
all_matches = find_closest_labels(invalid_label, valid_labels, max_suggestions=3)
|
||||
if all_matches:
|
||||
labels_str = ', '.join(f"'{label}'" for label in all_matches)
|
||||
suggestion = f"'{invalid_label}' → did you mean: {labels_str}?"
|
||||
else:
|
||||
suggestion = f"'{invalid_label}' → no close matches found"
|
||||
suggestions.append(suggestion)
|
||||
else:
|
||||
# No close matches - try prefix suggestions
|
||||
prefix_match = re.match(r'^([A-Z]\d+)-', invalid_label)
|
||||
if prefix_match:
|
||||
prefix = prefix_match.group(1)
|
||||
prefix_labels = [label for label in valid_labels if label.startswith(prefix + '-')]
|
||||
if prefix_labels:
|
||||
# If there's exactly one prefix match, auto-correct it
|
||||
if len(prefix_labels) == 1:
|
||||
corrections.append((invalid_label, prefix_labels[0]))
|
||||
else:
|
||||
# Multiple prefix matches - suggest alternatives
|
||||
suggestion = f"'{invalid_label}' → try labels starting with '{prefix}-': {', '.join(prefix_labels[:3])}"
|
||||
suggestions.append(suggestion)
|
||||
else:
|
||||
suggestion = f"'{invalid_label}' → no labels found with prefix '{prefix}-'"
|
||||
suggestions.append(suggestion)
|
||||
else:
|
||||
suggestion = f"'{invalid_label}' → invalid format (expected format: 'T1-FRAME', 'I2-bug', etc.)"
|
||||
suggestions.append(suggestion)
|
||||
|
||||
return corrections, suggestions
|
||||
|
||||
parser = argparse.ArgumentParser(prog="/cmd ", description='A command runner for pezkuwi-sdk repo', add_help=False)
|
||||
parser.add_argument('--help', action=_HelpAction, help='help for help if you need some help') # help for help
|
||||
for arg, config in common_args.items():
|
||||
parser.add_argument(arg, **config)
|
||||
|
||||
subparsers = parser.add_subparsers(help='a command to run', dest='command')
|
||||
|
||||
setup_logging()
|
||||
|
||||
"""
|
||||
BENCH
|
||||
"""
|
||||
|
||||
bench_example = '''**Examples**:
|
||||
Runs all benchmarks
|
||||
%(prog)s
|
||||
|
||||
Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions
|
||||
%(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet
|
||||
|
||||
Runs bench for all pallets for zagros runtime and fails fast on first failed benchmark
|
||||
%(prog)s --runtime zagros --fail-fast
|
||||
|
||||
Does not output anything and cleans up the previous bot's & author command triggering comments in PR
|
||||
%(prog)s --runtime zagros pezkuwichain --pallet pallet_balances pallet_multisig --quiet --clean
|
||||
'''
|
||||
|
||||
parser_bench = subparsers.add_parser('bench', aliases=['bench-omni'], help='Runs benchmarks (frame omni bencher)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
|
||||
for arg, config in common_args.items():
|
||||
parser_bench.add_argument(arg, **config)
|
||||
|
||||
parser_bench.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames)
|
||||
parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
|
||||
parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')
|
||||
|
||||
|
||||
"""
|
||||
FMT
|
||||
"""
|
||||
parser_fmt = subparsers.add_parser('fmt', help='Formats code (cargo +nightly-VERSION fmt) and configs (taplo format)')
|
||||
for arg, config in common_args.items():
|
||||
parser_fmt.add_argument(arg, **config)
|
||||
|
||||
"""
|
||||
Update UI
|
||||
"""
|
||||
parser_ui = subparsers.add_parser('update-ui', help='Updates UI tests')
|
||||
for arg, config in common_args.items():
|
||||
parser_ui.add_argument(arg, **config)
|
||||
|
||||
"""
|
||||
PRDOC
|
||||
"""
|
||||
# Import generate-prdoc.py dynamically
|
||||
spec = importlib.util.spec_from_file_location("generate_prdoc", ".github/scripts/generate-prdoc.py")
|
||||
generate_prdoc = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(generate_prdoc)
|
||||
|
||||
parser_prdoc = subparsers.add_parser('prdoc', help='Generates PR documentation')
|
||||
generate_prdoc.setup_parser(parser_prdoc, pr_required=False)
|
||||
|
||||
"""
|
||||
LABEL
|
||||
"""
|
||||
# Fetch current labels from repository
|
||||
def get_allowed_labels():
|
||||
"""Get the current list of allowed labels"""
|
||||
repo_labels = fetch_repo_labels()
|
||||
|
||||
if repo_labels is not None:
|
||||
return repo_labels
|
||||
else:
|
||||
# Fail if API fetch fails
|
||||
raise RuntimeError("Failed to fetch labels from repository. Please check your connection and try again.")
|
||||
|
||||
def validate_and_auto_correct_labels(input_labels, valid_labels):
|
||||
"""Validate labels and auto-correct when confidence is high"""
|
||||
final_labels = []
|
||||
correction_messages = []
|
||||
all_suggestions = []
|
||||
no_match_labels = []
|
||||
|
||||
# Process all labels first to collect all issues
|
||||
for label in input_labels:
|
||||
if label in valid_labels:
|
||||
final_labels.append(label)
|
||||
else:
|
||||
# Invalid label - try auto-correction
|
||||
corrections, suggestions = auto_correct_labels([label], valid_labels)
|
||||
|
||||
if corrections:
|
||||
# Auto-correct with high confidence
|
||||
original, corrected = corrections[0]
|
||||
final_labels.append(corrected)
|
||||
similarity = difflib.SequenceMatcher(None, original.lower(), corrected.lower()).ratio()
|
||||
correction_messages.append(f"Auto-corrected '{original}' → '{corrected}' (similarity: {similarity:.2f})")
|
||||
elif suggestions:
|
||||
# Low confidence - collect for batch error
|
||||
all_suggestions.extend(suggestions)
|
||||
else:
|
||||
# No suggestions at all
|
||||
no_match_labels.append(label)
|
||||
|
||||
# If there are any labels that couldn't be auto-corrected, show all at once
|
||||
if all_suggestions or no_match_labels:
|
||||
error_parts = []
|
||||
|
||||
if all_suggestions:
|
||||
error_parts.append("Labels requiring manual selection:")
|
||||
for suggestion in all_suggestions:
|
||||
error_parts.append(f" • {suggestion}")
|
||||
|
||||
if no_match_labels:
|
||||
if all_suggestions:
|
||||
error_parts.append("") # Empty line for separation
|
||||
error_parts.append("Labels with no close matches:")
|
||||
for label in no_match_labels:
|
||||
error_parts.append(f" • '{label}' → no valid suggestions available")
|
||||
|
||||
error_parts.append("")
|
||||
error_parts.append("For all available labels, see: https://docs.pezkuwichain.io/labels/doc_pezkuwi-sdk.html")
|
||||
|
||||
error_msg = "\n".join(error_parts)
|
||||
raise ValueError(error_msg)
|
||||
|
||||
return final_labels, correction_messages
|
||||
|
||||
label_example = '''**Examples**:
|
||||
Add single label
|
||||
%(prog)s T1-FRAME
|
||||
|
||||
Add multiple labels
|
||||
%(prog)s T1-FRAME R0-no-crate-publish-required
|
||||
|
||||
Add multiple labels
|
||||
%(prog)s T1-FRAME A2-substantial D3-involved
|
||||
|
||||
Labels are fetched dynamically from the repository.
|
||||
Typos are auto-corrected when confidence is high (>80% similarity).
|
||||
For label meanings, see: https://docs.pezkuwichain.io/labels/doc_pezkuwi-sdk.html
|
||||
'''
|
||||
|
||||
parser_label = subparsers.add_parser('label', help='Add labels to PR (self-service for contributors)', epilog=label_example, formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
for arg, config in common_args.items():
|
||||
parser_label.add_argument(arg, **config)
|
||||
|
||||
parser_label.add_argument('labels', nargs='+', help='Labels to add to the PR (auto-corrects typos)')
|
||||
|
||||
def main():
|
||||
global args, unknown, runtimesMatrix
|
||||
args, unknown = parser.parse_known_args()
|
||||
|
||||
print(f'args: {args}')
|
||||
|
||||
if args.command == 'bench' or args.command == 'bench-omni':
|
||||
runtime_pallets_map = {}
|
||||
failed_benchmarks = {}
|
||||
successful_benchmarks = {}
|
||||
|
||||
profile = "production"
|
||||
|
||||
print(f'Provided runtimes: {args.runtime}')
|
||||
# convert to mapped dict
|
||||
runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix))
|
||||
runtimesMatrix = {x['name']: x for x in runtimesMatrix}
|
||||
print(f'Filtered out runtimes: {runtimesMatrix}')
|
||||
|
||||
compile_bencher = os.system(f"cargo install -q --path substrate/utils/frame/omni-bencher --locked --profile {profile}")
|
||||
if compile_bencher != 0:
|
||||
print_and_log('❌ Failed to compile frame-omni-bencher')
|
||||
sys.exit(1)
|
||||
|
||||
# loop over remaining runtimes to collect available pallets
|
||||
for runtime in runtimesMatrix.values():
|
||||
build_command = f"forklift cargo build -q -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}"
|
||||
print(f'-- building "{runtime["name"]}" with `{build_command}`')
|
||||
build_status = os.system(build_command)
|
||||
if build_status != 0:
|
||||
print_and_log(f'❌ Failed to build {runtime["name"]}')
|
||||
if args.fail_fast:
|
||||
sys.exit(1)
|
||||
else:
|
||||
continue
|
||||
|
||||
print(f'-- listing pallets for benchmark for {runtime["name"]}')
|
||||
wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm"
|
||||
list_command = f"frame-omni-bencher v1 benchmark pallet " \
|
||||
f"--no-csv-header " \
|
||||
f"--no-storage-info " \
|
||||
f"--no-min-squares " \
|
||||
f"--no-median-slopes " \
|
||||
f"--all " \
|
||||
f"--list " \
|
||||
f"--runtime={wasm_file} " \
|
||||
f"{runtime['bench_flags']}"
|
||||
print(f'-- running: {list_command}')
|
||||
output = os.popen(list_command).read()
|
||||
raw_pallets = output.strip().split('\n')
|
||||
|
||||
all_pallets = set()
|
||||
for pallet in raw_pallets:
|
||||
if pallet:
|
||||
all_pallets.add(pallet.split(',')[0].strip())
|
||||
|
||||
pallets = list(all_pallets)
|
||||
print(f'Pallets in {runtime["name"]}: {pallets}')
|
||||
runtime_pallets_map[runtime['name']] = pallets
|
||||
|
||||
print(f'\n')
|
||||
|
||||
# filter out only the specified pallets from collected runtimes/pallets
|
||||
if args.pallet:
|
||||
print(f'Pallets: {args.pallet}')
|
||||
new_pallets_map = {}
|
||||
# keep only specified pallets if they exist in the runtime
|
||||
for runtime in runtime_pallets_map:
|
||||
if set(args.pallet).issubset(set(runtime_pallets_map[runtime])):
|
||||
new_pallets_map[runtime] = args.pallet
|
||||
|
||||
runtime_pallets_map = new_pallets_map
|
||||
|
||||
print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n')
|
||||
|
||||
if not runtime_pallets_map:
|
||||
if args.pallet and not args.runtime:
|
||||
print(f"No pallets {args.pallet} found in any runtime")
|
||||
elif args.runtime and not args.pallet:
|
||||
print(f"{args.runtime} runtime does not have any pallets")
|
||||
elif args.runtime and args.pallet:
|
||||
print(f"No pallets {args.pallet} found in {args.runtime}")
|
||||
else:
|
||||
print('No runtimes found')
|
||||
sys.exit(1)
|
||||
|
||||
for runtime in runtime_pallets_map:
|
||||
for pallet in runtime_pallets_map[runtime]:
|
||||
config = runtimesMatrix[runtime]
|
||||
header_path = os.path.abspath(config['header'])
|
||||
template = None
|
||||
|
||||
print(f'-- config: {config}')
|
||||
if runtime == 'dev':
|
||||
# to support sub-modules (https://github.com/paritytech/command-bot/issues/275)
|
||||
search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'"
|
||||
print(f'-- running: {search_manifest_path}')
|
||||
manifest_path = os.popen(search_manifest_path).read()
|
||||
if not manifest_path:
|
||||
print(f'-- pallet {pallet} not found in dev runtime')
|
||||
if args.fail_fast:
|
||||
print_and_log(f'Error: {pallet} not found in dev runtime')
|
||||
sys.exit(1)
|
||||
package_dir = os.path.dirname(manifest_path)
|
||||
print(f'-- package_dir: {package_dir}')
|
||||
print(f'-- manifest_path: {manifest_path}')
|
||||
output_path = os.path.join(package_dir, "src", "weights.rs")
|
||||
# TODO: we can remove once all pallets in dev runtime are migrated to polkadot-sdk-frame
|
||||
try:
|
||||
uses_polkadot_sdk_frame = "true" in os.popen(f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .dependencies | any(.name == \"polkadot-sdk-frame\")'").read()
|
||||
print(f'uses_polkadot_sdk_frame: {uses_polkadot_sdk_frame}')
|
||||
# Empty output from the previous os.popen command
|
||||
except StopIteration:
|
||||
print(f'Error: {pallet} not found in dev runtime')
|
||||
uses_polkadot_sdk_frame = False
|
||||
template = config['template']
|
||||
if uses_polkadot_sdk_frame and re.match(r"frame-(:?umbrella-)?weight-template\.hbs", os.path.normpath(template).split(os.path.sep)[-1]):
|
||||
template = "substrate/.maintain/frame-umbrella-weight-template.hbs"
|
||||
print(f'template: {template}')
|
||||
else:
|
||||
default_path = f"./{config['path']}/src/weights"
|
||||
xcm_path = f"./{config['path']}/src/weights/xcm"
|
||||
output_path = default_path
|
||||
if pallet.startswith("pallet_xcm_benchmarks"):
|
||||
template = config['template']
|
||||
output_path = xcm_path
|
||||
|
||||
print(f'-- benchmarking {pallet} in {runtime} into {output_path}')
|
||||
cmd = f"frame-omni-bencher v1 benchmark pallet " \
|
||||
f"--extrinsic=* " \
|
||||
f"--runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm " \
|
||||
f"--pallet={pallet} " \
|
||||
f"--header={header_path} " \
|
||||
f"--output={output_path} " \
|
||||
f"--wasm-execution=compiled " \
|
||||
f"--steps=50 " \
|
||||
f"--repeat=20 " \
|
||||
f"--heap-pages=4096 " \
|
||||
f"{f'--template={template} ' if template else ''}" \
|
||||
f"--no-storage-info --no-min-squares --no-median-slopes " \
|
||||
f"{config['bench_flags']}"
|
||||
print(f'-- Running: {cmd} \n')
|
||||
status = os.system(cmd)
|
||||
|
||||
if status != 0 and args.fail_fast:
|
||||
print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}')
|
||||
sys.exit(1)
|
||||
|
||||
# Otherwise collect failed benchmarks and print them at the end
|
||||
# push failed pallets to failed_benchmarks
|
||||
if status != 0:
|
||||
failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet]
|
||||
else:
|
||||
successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet]
|
||||
|
||||
if failed_benchmarks:
|
||||
print_and_log('❌ Failed benchmarks of runtimes/pallets:')
|
||||
for runtime, pallets in failed_benchmarks.items():
|
||||
print_and_log(f'-- {runtime}: {pallets}')
|
||||
|
||||
if successful_benchmarks:
|
||||
print_and_log('✅ Successful benchmarks of runtimes/pallets:')
|
||||
for runtime, pallets in successful_benchmarks.items():
|
||||
print_and_log(f'-- {runtime}: {pallets}')
|
||||
|
||||
elif args.command == 'fmt':
|
||||
command = f"cargo +nightly fmt"
|
||||
print(f'Formatting with `{command}`')
|
||||
nightly_status = os.system(f'{command}')
|
||||
taplo_status = os.system('taplo format --config .config/taplo.toml')
|
||||
|
||||
if (nightly_status != 0 or taplo_status != 0):
|
||||
print_and_log('❌ Failed to format code')
|
||||
sys.exit(1)
|
||||
|
||||
elif args.command == 'update-ui':
|
||||
command = 'sh ./scripts/update-ui-tests.sh'
|
||||
print(f'Updating ui with `{command}`')
|
||||
status = os.system(f'{command}')
|
||||
|
||||
if status != 0:
|
||||
print_and_log('❌ Failed to update ui')
|
||||
sys.exit(1)
|
||||
|
||||
elif args.command == 'prdoc':
|
||||
# Call the main function from ./github/scripts/generate-prdoc.py module
|
||||
exit_code = generate_prdoc.main(args)
|
||||
if exit_code != 0:
|
||||
print_and_log('❌ Failed to generate prdoc')
|
||||
sys.exit(exit_code)
|
||||
|
||||
elif args.command == 'label':
|
||||
# The actual labeling is handled by the GitHub Action workflow
|
||||
# This script validates and auto-corrects labels
|
||||
|
||||
try:
|
||||
# Check if PR is still open and not merged/in merge queue
|
||||
pr_number = os.environ.get('PR_NUM')
|
||||
if pr_number:
|
||||
if not check_pr_status(pr_number):
|
||||
raise ValueError("Cannot modify labels on merged PRs or PRs in merge queue")
|
||||
|
||||
# Check if user has permission to modify labels
|
||||
is_org_member = os.environ.get('IS_ORG_MEMBER', 'false').lower() == 'true'
|
||||
is_pr_author = os.environ.get('IS_PR_AUTHOR', 'false').lower() == 'true'
|
||||
|
||||
if not is_org_member and not is_pr_author:
|
||||
raise ValueError("Only the PR author or organization members can modify labels")
|
||||
|
||||
# Get allowed labels dynamically
|
||||
try:
|
||||
allowed_labels = get_allowed_labels()
|
||||
except RuntimeError as e:
|
||||
raise ValueError(str(e))
|
||||
|
||||
# Validate and auto-correct labels
|
||||
final_labels, correction_messages = validate_and_auto_correct_labels(args.labels, allowed_labels)
|
||||
|
||||
# Show auto-correction messages
|
||||
for message in correction_messages:
|
||||
print(message)
|
||||
|
||||
# Output labels as JSON for GitHub Action
|
||||
import json
|
||||
labels_output = {"labels": final_labels}
|
||||
print(f"LABELS_JSON: {json.dumps(labels_output)}")
|
||||
except ValueError as e:
|
||||
print_and_log(f'❌ {e}')
|
||||
|
||||
# Output error as JSON for GitHub Action
|
||||
import json
|
||||
error_output = {
|
||||
"error": "validation_failed",
|
||||
"message": "Invalid labels found. Please check the suggestions below and try again.",
|
||||
"details": str(e)
|
||||
}
|
||||
print(f"ERROR_JSON: {json.dumps(error_output)}")
|
||||
sys.exit(1)
|
||||
|
||||
print('🚀 Done')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,773 @@
|
||||
import unittest
|
||||
from unittest.mock import patch, mock_open, MagicMock, call
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
|
||||
# Mock data for runtimes-matrix.json
|
||||
mock_runtimes_matrix = [
|
||||
{
|
||||
"name": "dev",
|
||||
"package": "kitchensink-runtime",
|
||||
"path": "substrate/frame",
|
||||
"header": "substrate/HEADER-APACHE2",
|
||||
"template": "substrate/.maintain/frame-weight-template.hbs",
|
||||
"bench_features": "runtime-benchmarks",
|
||||
"bench_flags": "--flag1 --flag2"
|
||||
},
|
||||
{
|
||||
"name": "zagros",
|
||||
"package": "zagros-runtime",
|
||||
"path": "pezkuwi/runtime/zagros",
|
||||
"header": "pezkuwi/file_header.txt",
|
||||
"template": "pezkuwi/xcm/pallet-xcm-benchmarks/template.hbs",
|
||||
"bench_features": "runtime-benchmarks",
|
||||
"bench_flags": "--flag3 --flag4"
|
||||
},
|
||||
{
|
||||
"name": "pezkuwichain",
|
||||
"package": "pezkuwichain-runtime",
|
||||
"path": "pezkuwi/runtime/pezkuwichain",
|
||||
"header": "pezkuwi/file_header.txt",
|
||||
"template": "pezkuwi/xcm/pallet-xcm-benchmarks/template.hbs",
|
||||
"bench_features": "runtime-benchmarks",
|
||||
"bench_flags": ""
|
||||
},
|
||||
{
|
||||
"name": "asset-hub-zagros",
|
||||
"package": "asset-hub-zagros-runtime",
|
||||
"path": "cumulus/teyrchains/runtimes/assets/asset-hub-zagros",
|
||||
"header": "cumulus/file_header.txt",
|
||||
"template": "cumulus/templates/xcm-bench-template.hbs",
|
||||
"bench_features": "runtime-benchmarks",
|
||||
"bench_flags": "--flag7 --flag8"
|
||||
}
|
||||
]
|
||||
|
||||
def get_mock_bench_output(runtime, pallets, output_path, header, bench_flags, template = None):
|
||||
return f"frame-omni-bencher v1 benchmark pallet --extrinsic=* " \
|
||||
f"--runtime=target/production/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \
|
||||
f"--pallet={pallets} --header={header} " \
|
||||
f"--output={output_path} " \
|
||||
f"--wasm-execution=compiled " \
|
||||
f"--steps=50 --repeat=20 --heap-pages=4096 " \
|
||||
f"{f'--template={template} ' if template else ''}" \
|
||||
f"--no-storage-info --no-min-squares --no-median-slopes " \
|
||||
f"{bench_flags}"
|
||||
|
||||
class TestCmd(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.patcher1 = patch('builtins.open', new_callable=mock_open, read_data=json.dumps(mock_runtimes_matrix))
|
||||
self.patcher2 = patch('json.load', return_value=mock_runtimes_matrix)
|
||||
self.patcher3 = patch('argparse.ArgumentParser.parse_known_args')
|
||||
self.patcher4 = patch('os.system', return_value=0)
|
||||
self.patcher5 = patch('os.popen')
|
||||
self.patcher6 = patch('importlib.util.spec_from_file_location', return_value=MagicMock())
|
||||
self.patcher7 = patch('importlib.util.module_from_spec', return_value=MagicMock())
|
||||
self.patcher8 = patch('cmd.generate_prdoc.main', return_value=0)
|
||||
|
||||
self.mock_open = self.patcher1.start()
|
||||
self.mock_json_load = self.patcher2.start()
|
||||
self.mock_parse_args = self.patcher3.start()
|
||||
self.mock_system = self.patcher4.start()
|
||||
self.mock_popen = self.patcher5.start()
|
||||
self.mock_spec_from_file_location = self.patcher6.start()
|
||||
self.mock_module_from_spec = self.patcher7.start()
|
||||
self.mock_generate_prdoc_main = self.patcher8.start()
|
||||
|
||||
# Ensure that cmd.py uses the mock_runtimes_matrix
|
||||
import cmd
|
||||
cmd.runtimesMatrix = mock_runtimes_matrix
|
||||
|
||||
def tearDown(self):
|
||||
self.patcher1.stop()
|
||||
self.patcher2.stop()
|
||||
self.patcher3.stop()
|
||||
self.patcher4.stop()
|
||||
self.patcher5.stop()
|
||||
self.patcher6.stop()
|
||||
self.patcher7.stop()
|
||||
self.patcher8.stop()
|
||||
|
||||
def test_bench_command_normal_execution_all_runtimes(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)),
|
||||
pallet=['pallet_balances'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_balances\npallet_staking\npallet_something\n", # Output for dev runtime
|
||||
"pallet_balances\npallet_staking\npallet_something\n", # Output for zagros runtime
|
||||
"pallet_staking\npallet_something\n", # Output for pezkuwichain runtime - no pallet here
|
||||
"pallet_balances\npallet_staking\npallet_something\n", # Output for asset-hub-zagros runtime
|
||||
"./substrate/frame/balances/Cargo.toml\n", # Mock manifest path for dev -> pallet_balances
|
||||
]
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p kitchensink-runtime --profile production --features=runtime-benchmarks"),
|
||||
call("forklift cargo build -q -p zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
call("forklift cargo build -q -p pezkuwichain-runtime --profile production --features=runtime-benchmarks"),
|
||||
call("forklift cargo build -q -p asset-hub-zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
|
||||
call(get_mock_bench_output(
|
||||
runtime='kitchensink',
|
||||
pallets='pallet_balances',
|
||||
output_path='./substrate/frame/balances/src/weights.rs',
|
||||
header=os.path.abspath('substrate/HEADER-APACHE2'),
|
||||
bench_flags='--flag1 --flag2',
|
||||
template="substrate/.maintain/frame-weight-template.hbs"
|
||||
)),
|
||||
call(get_mock_bench_output(
|
||||
runtime='zagros',
|
||||
pallets='pallet_balances',
|
||||
output_path='./pezkuwi/runtime/zagros/src/weights',
|
||||
header=os.path.abspath('pezkuwi/file_header.txt'),
|
||||
bench_flags='--flag3 --flag4'
|
||||
)),
|
||||
# skips pezkuwichain benchmark
|
||||
call(get_mock_bench_output(
|
||||
runtime='asset-hub-zagros',
|
||||
pallets='pallet_balances',
|
||||
output_path='./cumulus/teyrchains/runtimes/assets/asset-hub-zagros/src/weights',
|
||||
header=os.path.abspath('cumulus/file_header.txt'),
|
||||
bench_flags='--flag7 --flag8'
|
||||
)),
|
||||
]
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
def test_bench_command_normal_execution(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=['zagros'],
|
||||
pallet=['pallet_balances', 'pallet_staking'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
header_path = os.path.abspath('pezkuwi/file_header.txt')
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_balances\npallet_staking\npallet_something\n", # Output for zagros runtime
|
||||
]
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
|
||||
# Zagros runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='zagros',
|
||||
pallets='pallet_balances',
|
||||
output_path='./pezkuwi/runtime/zagros/src/weights',
|
||||
header=header_path,
|
||||
bench_flags='--flag3 --flag4'
|
||||
)),
|
||||
call(get_mock_bench_output(
|
||||
runtime='zagros',
|
||||
pallets='pallet_staking',
|
||||
output_path='./pezkuwi/runtime/zagros/src/weights',
|
||||
header=header_path,
|
||||
bench_flags='--flag3 --flag4'
|
||||
)),
|
||||
]
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
|
||||
def test_bench_command_normal_execution_xcm(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=['zagros'],
|
||||
pallet=['pallet_xcm_benchmarks::generic'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
header_path = os.path.abspath('pezkuwi/file_header.txt')
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_balances\npallet_staking\npallet_something\npallet_xcm_benchmarks::generic\n", # Output for zagros runtime
|
||||
]
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
|
||||
# Zagros runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='zagros',
|
||||
pallets='pallet_xcm_benchmarks::generic',
|
||||
output_path='./pezkuwi/runtime/zagros/src/weights/xcm',
|
||||
header=header_path,
|
||||
bench_flags='--flag3 --flag4',
|
||||
template="pezkuwi/xcm/pallet-xcm-benchmarks/template.hbs"
|
||||
)),
|
||||
]
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
def test_bench_command_two_runtimes_two_pallets(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=['zagros', 'pezkuwichain'],
|
||||
pallet=['pallet_balances', 'pallet_staking'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_staking\npallet_balances\n", # Output for zagros runtime
|
||||
"pallet_staking\npallet_balances\n", # Output for pezkuwichain runtime
|
||||
]
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
header_path = os.path.abspath('pezkuwi/file_header.txt')
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
call("forklift cargo build -q -p pezkuwichain-runtime --profile production --features=runtime-benchmarks"),
|
||||
# Zagros runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='zagros',
|
||||
pallets='pallet_staking',
|
||||
output_path='./pezkuwi/runtime/zagros/src/weights',
|
||||
header=header_path,
|
||||
bench_flags='--flag3 --flag4'
|
||||
)),
|
||||
call(get_mock_bench_output(
|
||||
runtime='zagros',
|
||||
pallets='pallet_balances',
|
||||
output_path='./pezkuwi/runtime/zagros/src/weights',
|
||||
header=header_path,
|
||||
bench_flags='--flag3 --flag4'
|
||||
)),
|
||||
# Pezkuwichain runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='pezkuwichain',
|
||||
pallets='pallet_staking',
|
||||
output_path='./pezkuwi/runtime/pezkuwichain/src/weights',
|
||||
header=header_path,
|
||||
bench_flags=''
|
||||
)),
|
||||
call(get_mock_bench_output(
|
||||
runtime='pezkuwichain',
|
||||
pallets='pallet_balances',
|
||||
output_path='./pezkuwi/runtime/pezkuwichain/src/weights',
|
||||
header=header_path,
|
||||
bench_flags=''
|
||||
)),
|
||||
]
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
def test_bench_command_one_dev_runtime(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=['dev'],
|
||||
pallet=['pallet_balances'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
manifest_dir = "substrate/frame/kitchensink"
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_balances\npallet_something", # Output for dev runtime
|
||||
manifest_dir + "/Cargo.toml" # Output for manifest path in dev runtime
|
||||
]
|
||||
header_path = os.path.abspath('substrate/HEADER-APACHE2')
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p kitchensink-runtime --profile production --features=runtime-benchmarks"),
|
||||
# Westend runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='kitchensink',
|
||||
pallets='pallet_balances',
|
||||
output_path=manifest_dir + "/src/weights.rs",
|
||||
header=header_path,
|
||||
bench_flags='--flag1 --flag2',
|
||||
template="substrate/.maintain/frame-weight-template.hbs"
|
||||
)),
|
||||
]
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
def test_bench_command_one_cumulus_runtime(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=['asset-hub-zagros'],
|
||||
pallet=['pallet_assets'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_assets\n", # Output for asset-hub-zagros runtime
|
||||
]
|
||||
header_path = os.path.abspath('cumulus/file_header.txt')
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p asset-hub-zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
# Asset-hub-zagros runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='asset-hub-zagros',
|
||||
pallets='pallet_assets',
|
||||
output_path='./cumulus/teyrchains/runtimes/assets/asset-hub-zagros/src/weights',
|
||||
header=header_path,
|
||||
bench_flags='--flag7 --flag8'
|
||||
)),
|
||||
]
|
||||
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
def test_bench_command_one_cumulus_runtime_xcm(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=['asset-hub-zagros'],
|
||||
pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_assets\npallet_xcm_benchmarks::generic\n", # Output for asset-hub-zagros runtime
|
||||
]
|
||||
header_path = os.path.abspath('cumulus/file_header.txt')
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p asset-hub-zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
# Asset-hub-zagros runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='asset-hub-zagros',
|
||||
pallets='pallet_xcm_benchmarks::generic',
|
||||
output_path='./cumulus/teyrchains/runtimes/assets/asset-hub-zagros/src/weights/xcm',
|
||||
header=header_path,
|
||||
bench_flags='--flag7 --flag8',
|
||||
template="cumulus/templates/xcm-bench-template.hbs"
|
||||
)),
|
||||
call(get_mock_bench_output(
|
||||
runtime='asset-hub-zagros',
|
||||
pallets='pallet_assets',
|
||||
output_path='./cumulus/teyrchains/runtimes/assets/asset-hub-zagros/src/weights',
|
||||
header=header_path,
|
||||
bench_flags='--flag7 --flag8'
|
||||
)),
|
||||
]
|
||||
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt'), []))
|
||||
@patch('os.system', return_value=0)
|
||||
def test_fmt_command(self, mock_system, mock_parse_args):
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
mock_system.assert_any_call('cargo +nightly fmt')
|
||||
mock_system.assert_any_call('taplo format --config .config/taplo.toml')
|
||||
|
||||
@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui'), []))
|
||||
@patch('os.system', return_value=0)
|
||||
def test_update_ui_command(self, mock_system, mock_parse_args):
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
mock_system.assert_called_with('sh ./scripts/update-ui-tests.sh')
|
||||
|
||||
@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='prdoc'), []))
|
||||
@patch('os.system', return_value=0)
|
||||
def test_prdoc_command(self, mock_system, mock_parse_args):
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
self.mock_generate_prdoc_main.assert_called_with(mock_parse_args.return_value[0])
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_valid_labels(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command with valid labels"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required', 'D2-substantial']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME', 'R0-no-crate-publish-required']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
# Check that JSON output was printed
|
||||
json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'LABELS_JSON:' in str(call):
|
||||
json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(json_call)
|
||||
self.assertIn('T1-FRAME', str(json_call))
|
||||
self.assertIn('R0-no-crate-publish-required', str(json_call))
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_auto_correction(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command with auto-correctable typos"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required', 'D2-substantial']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAM', 'R0-no-crate-publish'] # Typos that should be auto-corrected
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
# Check for auto-correction messages
|
||||
correction_messages = [str(call) for call in mock_print.call_args_list if 'Auto-corrected' in str(call)]
|
||||
self.assertTrue(len(correction_messages) > 0)
|
||||
|
||||
# Check that JSON output contains corrected labels
|
||||
json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'LABELS_JSON:' in str(call):
|
||||
json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(json_call)
|
||||
self.assertIn('T1-FRAME', str(json_call))
|
||||
self.assertIn('R0-no-crate-publish-required', str(json_call))
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_prefix_correction(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command with prefix matching"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'T2-pallets', 'R0-no-crate-publish-required']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-something'] # Should match T1-FRAME as the only T1- label
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
# Check that JSON output contains corrected label
|
||||
json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'LABELS_JSON:' in str(call):
|
||||
json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(json_call)
|
||||
self.assertIn('T1-FRAME', str(json_call))
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_invalid_labels(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command with invalid labels that cannot be corrected"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required', 'D2-substantial']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['INVALID-LABEL', 'ANOTHER-BAD-LABEL']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_called_with(1) # Should exit with error code
|
||||
|
||||
# Check for error JSON output
|
||||
error_json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'ERROR_JSON:' in str(call):
|
||||
error_json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(error_json_call)
|
||||
self.assertIn('validation_failed', str(error_json_call))
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_mixed_valid_invalid(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command with mix of valid and invalid labels"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required', 'D2-substantial']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME', 'INVALID-LABEL', 'D2-substantial']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_called_with(1) # Should exit with error code due to invalid label
|
||||
|
||||
# Check for error JSON output
|
||||
error_json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'ERROR_JSON:' in str(call):
|
||||
error_json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(error_json_call)
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_fetch_failure(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command when label fetching fails"""
|
||||
mock_get_labels.side_effect = RuntimeError("Failed to fetch labels from repository. Please check your connection and try again.")
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_called_with(1) # Should exit with error code
|
||||
|
||||
# Check for error JSON output
|
||||
error_json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'ERROR_JSON:' in str(call):
|
||||
error_json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(error_json_call)
|
||||
self.assertIn('Failed to fetch labels from repository', str(error_json_call))
|
||||
|
||||
def test_auto_correct_labels_function(self):
|
||||
"""Test the auto_correct_labels function directly"""
|
||||
import cmd
|
||||
|
||||
valid_labels = ['T1-FRAME', 'R0-no-crate-publish-required', 'D2-substantial', 'I2-bug']
|
||||
|
||||
# Test high similarity auto-correction
|
||||
corrections, suggestions = cmd.auto_correct_labels(['T1-FRAM'], valid_labels)
|
||||
self.assertEqual(len(corrections), 1)
|
||||
self.assertEqual(corrections[0][0], 'T1-FRAM')
|
||||
self.assertEqual(corrections[0][1], 'T1-FRAME')
|
||||
|
||||
# Test low similarity suggestions
|
||||
corrections, suggestions = cmd.auto_correct_labels(['TOTALLY-WRONG'], valid_labels)
|
||||
self.assertEqual(len(corrections), 0)
|
||||
self.assertEqual(len(suggestions), 1)
|
||||
|
||||
def test_find_closest_labels_function(self):
|
||||
"""Test the find_closest_labels function directly"""
|
||||
import cmd
|
||||
|
||||
valid_labels = ['T1-FRAME', 'T2-pallets', 'R0-no-crate-publish-required']
|
||||
|
||||
# Test finding close matches
|
||||
matches = cmd.find_closest_labels('T1-FRAM', valid_labels)
|
||||
self.assertIn('T1-FRAME', matches)
|
||||
|
||||
# Test no close matches
|
||||
matches = cmd.find_closest_labels('COMPLETELY-DIFFERENT', valid_labels, cutoff=0.8)
|
||||
self.assertEqual(len(matches), 0)
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_merged_pr(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command on merged PR should fail"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required']
|
||||
mock_check_pr_status.return_value = False # PR is merged/closed
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_called_with(1)
|
||||
|
||||
# Check for error JSON output
|
||||
error_json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'ERROR_JSON:' in str(call):
|
||||
error_json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(error_json_call)
|
||||
self.assertIn('Cannot modify labels on merged PRs', str(error_json_call))
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_open_pr(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command on open PR should succeed"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
# Check that JSON output was printed
|
||||
json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'LABELS_JSON:' in str(call):
|
||||
json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(json_call)
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'false', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_unauthorized_user(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command by unauthorized user should fail"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_called_with(1)
|
||||
|
||||
# Check for error JSON output
|
||||
error_json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'ERROR_JSON:' in str(call):
|
||||
error_json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(error_json_call)
|
||||
self.assertIn('Only the PR author or organization members can modify labels', str(error_json_call))
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'false', 'IS_PR_AUTHOR': 'true', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_pr_author(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command by PR author should succeed"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
# Check that JSON output was printed
|
||||
json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'LABELS_JSON:' in str(call):
|
||||
json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(json_call)
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_org_member(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command by org member should succeed"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
# Check that JSON output was printed
|
||||
json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'LABELS_JSON:' in str(call):
|
||||
json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(json_call)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Executable
+588
@@ -0,0 +1,588 @@
|
||||
#!/bin/sh
|
||||
|
||||
api_base="https://api.github.com/repos"
|
||||
|
||||
# Function to take 2 git tags/commits and get any lines from commit messages
|
||||
# that contain something that looks like a PR reference: e.g., (#1234)
|
||||
sanitised_git_logs(){
|
||||
git --no-pager log --pretty=format:"%s" "$1...$2" |
|
||||
# Only find messages referencing a PR
|
||||
grep -E '\(#[0-9]+\)' |
|
||||
# Strip any asterisks
|
||||
sed 's/^* //g'
|
||||
}
|
||||
|
||||
# Checks whether a tag on github has been verified
|
||||
# repo: 'organization/repo'
|
||||
# tagver: 'v1.2.3'
|
||||
# Usage: check_tag $repo $tagver
|
||||
check_tag () {
|
||||
repo=$1
|
||||
tagver=$2
|
||||
if [ -n "$GITHUB_RELEASE_TOKEN" ]; then
|
||||
echo '[+] Fetching tag using privileged token'
|
||||
tag_out=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$api_base/$repo/git/refs/tags/$tagver")
|
||||
else
|
||||
echo '[+] Fetching tag using unprivileged token'
|
||||
tag_out=$(curl -H "Authorization: token $GITHUB_PR_TOKEN" -s "$api_base/$repo/git/refs/tags/$tagver")
|
||||
fi
|
||||
tag_sha=$(echo "$tag_out" | jq -r .object.sha)
|
||||
object_url=$(echo "$tag_out" | jq -r .object.url)
|
||||
if [ "$tag_sha" = "null" ]; then
|
||||
return 2
|
||||
fi
|
||||
echo "[+] Tag object SHA: $tag_sha"
|
||||
verified_str=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$object_url" | jq -r .verification.verified)
|
||||
if [ "$verified_str" = "true" ]; then
|
||||
# Verified, everything is good
|
||||
return 0
|
||||
else
|
||||
# Not verified. Bad juju.
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Checks whether a given PR has a given label.
|
||||
# repo: 'organization/repo'
|
||||
# pr_id: 12345
|
||||
# label: B1-silent
|
||||
# Usage: has_label $repo $pr_id $label
|
||||
has_label(){
|
||||
repo="$1"
|
||||
pr_id="$2"
|
||||
label="$3"
|
||||
|
||||
# These will exist if the function is called in Gitlab.
|
||||
# If the function's called in Github, we should have GITHUB_ACCESS_TOKEN set
|
||||
# already.
|
||||
if [ -n "$GITHUB_RELEASE_TOKEN" ]; then
|
||||
GITHUB_TOKEN="$GITHUB_RELEASE_TOKEN"
|
||||
elif [ -n "$GITHUB_PR_TOKEN" ]; then
|
||||
GITHUB_TOKEN="$GITHUB_PR_TOKEN"
|
||||
fi
|
||||
|
||||
out=$(curl -H "Authorization: token $GITHUB_TOKEN" -s "$api_base/$repo/pulls/$pr_id")
|
||||
[ -n "$(echo "$out" | tr -d '\r\n' | jq ".labels | .[] | select(.name==\"$label\")")" ]
|
||||
}
|
||||
|
||||
github_label () {
|
||||
echo
|
||||
echo "# run github-api job for labeling it ${1}"
|
||||
curl -sS -X POST \
|
||||
-F "token=${CI_JOB_TOKEN}" \
|
||||
-F "ref=master" \
|
||||
-F "variables[LABEL]=${1}" \
|
||||
-F "variables[PRNO]=${CI_COMMIT_REF_NAME}" \
|
||||
-F "variables[PROJECT]=pezkuwichain/pezkuwi" \
|
||||
"${GITLAB_API}/projects/${GITHUB_API_PROJECT}/trigger/pipeline"
|
||||
}
|
||||
|
||||
# Formats a message into a JSON string for posting to Matrix
|
||||
# message: 'any plaintext message'
|
||||
# formatted_message: '<strong>optional message formatted in <em>html</em></strong>'
|
||||
# Usage: structure_message $content $formatted_content (optional)
|
||||
structure_message() {
|
||||
if [ -z "$2" ]; then
|
||||
body=$(jq -Rs --arg body "$1" '{"msgtype": "m.text", $body}' < /dev/null)
|
||||
else
|
||||
body=$(jq -Rs --arg body "$1" --arg formatted_body "$2" '{"msgtype": "m.text", $body, "format": "org.matrix.custom.html", $formatted_body}' < /dev/null)
|
||||
fi
|
||||
echo "$body"
|
||||
}
|
||||
|
||||
# Post a message to a matrix room
|
||||
# body: '{body: "JSON string produced by structure_message"}'
|
||||
# room_id: !fsfSRjgjBWEWffws:matrix.parity.io
|
||||
# access_token: see https://matrix.org/docs/guides/client-server-api/
|
||||
# Usage: send_message $body (json formatted) $room_id $access_token
|
||||
send_message() {
|
||||
curl -XPOST -d "$1" "https://m.parity.io/_matrix/client/r0/rooms/$2/send/m.room.message?access_token=$3"
|
||||
}
|
||||
|
||||
# Pretty-printing functions
|
||||
boldprint () { printf "|\n| \033[1m%s\033[0m\n|\n" "${@}"; }
|
||||
boldcat () { printf "|\n"; while read -r l; do printf "| \033[1m%s\033[0m\n" "${l}"; done; printf "|\n" ; }
|
||||
|
||||
skip_if_companion_pr() {
|
||||
url="https://api.github.com/repos/pezkuwichain/pezkuwi/pulls/${CI_COMMIT_REF_NAME}"
|
||||
echo "[+] API URL: $url"
|
||||
|
||||
pr_title=$(curl -sSL -H "Authorization: token ${GITHUB_PR_TOKEN}" "$url" | jq -r .title)
|
||||
echo "[+] PR title: $pr_title"
|
||||
|
||||
if echo "$pr_title" | grep -qi '^companion'; then
|
||||
echo "[!] PR is a companion PR. Build is already done in substrate"
|
||||
exit 0
|
||||
else
|
||||
echo "[+] PR is not a companion PR. Proceeding test"
|
||||
fi
|
||||
}
|
||||
|
||||
# Fetches the tag name of the latest release from a repository
|
||||
# repo: 'organisation/repo'
|
||||
# Usage: latest_release 'pezkuwichain/pezkuwi'
|
||||
latest_release() {
|
||||
curl -s "$api_base/$1/releases/latest" | jq -r '.tag_name'
|
||||
}
|
||||
|
||||
# Check for runtime changes between two commits. This is defined as any changes
|
||||
# to /primitives/src/* and any *production* chains under /runtime
|
||||
has_runtime_changes() {
|
||||
from=$1
|
||||
to=$2
|
||||
|
||||
if git diff --name-only "${from}...${to}" \
|
||||
| grep -q -e '^runtime/pezkuwi' -e '^runtime/kusama' -e '^primitives/src/' -e '^runtime/common'
|
||||
then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# given a bootnode and the path to a chainspec file, this function will create a new chainspec file
|
||||
# with only the bootnode specified and test whether that bootnode provides peers
|
||||
# The optional third argument is the index of the bootnode in the list of bootnodes, this is just used to pick an ephemeral
|
||||
# port for the node to run on. If you're only testing one, it'll just use the first ephemeral port
|
||||
# BOOTNODE: /dns/pezkuwi-connect-0.parity.io/tcp/443/wss/p2p/12D3KooWEPmjoRpDSUuiTjvyNDd8fejZ9eNWH5bE965nyBMDrB4o
|
||||
# CHAINSPEC_FILE: /path/to/pezkuwi.json
|
||||
check_bootnode(){
|
||||
BOOTNODE=$1
|
||||
BASE_CHAINSPEC=$2
|
||||
RUNTIME=$(basename "$BASE_CHAINSPEC" | cut -d '.' -f 1)
|
||||
MIN_PEERS=1
|
||||
|
||||
# Generate a temporary chainspec file containing only the bootnode we care about
|
||||
TMP_CHAINSPEC_FILE="$RUNTIME.$(echo "$BOOTNODE" | tr '/' '_').tmp.json"
|
||||
jq ".bootNodes = [\"$BOOTNODE\"] " < "$CHAINSPEC_FILE" > "$TMP_CHAINSPEC_FILE"
|
||||
|
||||
# Grab an unused port by binding to port 0 and then immediately closing the socket
|
||||
# This is a bit of a hack, but it's the only way to do it in the shell
|
||||
RPC_PORT=$(python -c "import socket; s=socket.socket(); s.bind(('', 0)); print(s.getsockname()[1]); s.close()")
|
||||
|
||||
echo "[+] Checking bootnode $BOOTNODE"
|
||||
pezkuwi --chain "$TMP_CHAINSPEC_FILE" --no-mdns --rpc-port="$RPC_PORT" --tmp > /dev/null 2>&1 &
|
||||
# Wait a few seconds for the node to start up
|
||||
sleep 5
|
||||
PEZKUWI_PID=$!
|
||||
|
||||
MAX_POLLS=10
|
||||
TIME_BETWEEN_POLLS=3
|
||||
for _ in $(seq 1 "$MAX_POLLS"); do
|
||||
# Check the health endpoint of the RPC node
|
||||
PEERS="$(curl -s -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"system_health","params":[],"id":1}' http://localhost:"$RPC_PORT" | jq -r '.result.peers')"
|
||||
# Sometimes due to machine load or other reasons, we don't get a response from the RPC node
|
||||
# If $PEERS is an empty variable, make it 0 so we can still do the comparison
|
||||
if [ -z "$PEERS" ]; then
|
||||
PEERS=0
|
||||
fi
|
||||
if [ "$PEERS" -ge $MIN_PEERS ]; then
|
||||
echo "[+] $PEERS peers found for $BOOTNODE"
|
||||
echo " Bootnode appears contactable"
|
||||
kill $PEZKUWI_PID
|
||||
# Delete the temporary chainspec file now we're done running the node
|
||||
rm "$TMP_CHAINSPEC_FILE"
|
||||
return 0
|
||||
fi
|
||||
sleep "$TIME_BETWEEN_POLLS"
|
||||
done
|
||||
kill $PEZKUWI_PID
|
||||
# Delete the temporary chainspec file now we're done running the node
|
||||
rm "$TMP_CHAINSPEC_FILE"
|
||||
echo "[!] No peers found for $BOOTNODE"
|
||||
echo " Bootnode appears unreachable"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Assumes the ENV are set:
|
||||
# - RELEASE_ID
|
||||
# - GITHUB_TOKEN
|
||||
# - REPO in the form pezkuwichain/pezkuwi
|
||||
fetch_release_artifacts() {
|
||||
echo "Release ID : $RELEASE_ID"
|
||||
echo "Repo : $REPO"
|
||||
echo "Binary : $BINARY"
|
||||
OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"}
|
||||
echo "OUTPUT_DIR : $OUTPUT_DIR"
|
||||
|
||||
echo "Fetching release info..."
|
||||
curl -L -s \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: Bearer ${GITHUB_TOKEN}" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
https://api.github.com/repos/${REPO}/releases/${RELEASE_ID} > release.json
|
||||
|
||||
echo "Extract asset ids..."
|
||||
ids=($(jq -r '.assets[].id' < release.json ))
|
||||
echo "Extract asset count..."
|
||||
count=$(jq '.assets|length' < release.json )
|
||||
|
||||
# Fetch artifacts
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
pushd "$OUTPUT_DIR" > /dev/null
|
||||
|
||||
echo "Fetching assets..."
|
||||
iter=1
|
||||
for id in "${ids[@]}"
|
||||
do
|
||||
echo " - $iter/$count: downloading asset id: $id..."
|
||||
curl -s -OJ -L -H "Accept: application/octet-stream" \
|
||||
-H "Authorization: Token ${GITHUB_TOKEN}" \
|
||||
"https://api.github.com/repos/${REPO}/releases/assets/$id"
|
||||
iter=$((iter + 1))
|
||||
done
|
||||
|
||||
pwd
|
||||
ls -al --color
|
||||
popd > /dev/null
|
||||
}
|
||||
|
||||
# Fetch rpm package from S3.
|
||||
fetch_rpm_package_from_s3() {
|
||||
BINARY=$1
|
||||
OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"}
|
||||
|
||||
echo "--- Preparing to fetch RPM package ---"
|
||||
echo "Git Tag (VERSION): $VERSION"
|
||||
echo "Code Version (NODE_VERSION): $NODE_VERSION"
|
||||
|
||||
URL_BASE=$(get_s3_url_base $BINARY)
|
||||
|
||||
# CORRECTED FILENAME: Changed underscore to hyphen to match the uploaded file.
|
||||
FILENAME="${BINARY}-${NODE_VERSION}-1.x86_64.rpm"
|
||||
|
||||
URL="${URL_BASE}/${VERSION}/x86_64-unknown-linux-gnu/${FILENAME}"
|
||||
|
||||
echo "Constructed URL: $URL"
|
||||
echo "------------------------------------"
|
||||
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
pushd "$OUTPUT_DIR" > /dev/null
|
||||
|
||||
echo "Fetching rpm package..."
|
||||
|
||||
# This curl command will now succeed because the URL is correct.
|
||||
curl --fail --progress-bar -LO "$URL"
|
||||
|
||||
echo "Download successful."
|
||||
ls -al
|
||||
popd > /dev/null
|
||||
}
|
||||
|
||||
# Fetch deb package from S3. Assumes the ENV are set:
|
||||
# - RELEASE_ID
|
||||
# - GITHUB_TOKEN
|
||||
# - REPO in the form pezkuwichain/pezkuwi
|
||||
fetch_debian_package_from_s3() {
|
||||
BINARY=$1
|
||||
echo "Version : $NODE_VERSION"
|
||||
echo "Repo : $REPO"
|
||||
echo "Binary : $BINARY"
|
||||
echo "Tag : $VERSION"
|
||||
OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"}
|
||||
echo "OUTPUT_DIR : $OUTPUT_DIR"
|
||||
|
||||
URL_BASE=$(get_s3_url_base $BINARY)
|
||||
echo "URL_BASE=$URL_BASE"
|
||||
|
||||
URL=$URL_BASE/$VERSION/x86_64-unknown-linux-gnu/${BINARY}_${NODE_VERSION}_amd64.deb
|
||||
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
pushd "$OUTPUT_DIR" > /dev/null
|
||||
|
||||
echo "Fetching deb package..."
|
||||
|
||||
echo "Fetching %s" "$URL"
|
||||
curl --progress-bar -LO "$URL" || echo "Missing $URL"
|
||||
|
||||
pwd
|
||||
ls -al --color
|
||||
popd > /dev/null
|
||||
|
||||
}
|
||||
|
||||
# Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set:
|
||||
# inputs: binary (pezkuwi), target(aarch64-apple-darwin)
|
||||
fetch_release_artifacts_from_s3() {
|
||||
BINARY=$1
|
||||
TARGET=$2
|
||||
OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${TARGET}/${BINARY}"}
|
||||
echo "OUTPUT_DIR : $OUTPUT_DIR"
|
||||
|
||||
URL_BASE=$(get_s3_url_base $BINARY)
|
||||
echo "URL_BASE=$URL_BASE"
|
||||
|
||||
URL_BINARY=$URL_BASE/$VERSION/$TARGET/$BINARY
|
||||
URL_SHA=$URL_BASE/$VERSION/$TARGET/$BINARY.sha256
|
||||
URL_ASC=$URL_BASE/$VERSION/$TARGET/$BINARY.asc
|
||||
|
||||
# Fetch artifacts
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
pushd "$OUTPUT_DIR" > /dev/null
|
||||
|
||||
echo "Fetching artifacts..."
|
||||
for URL in $URL_BINARY $URL_SHA $URL_ASC; do
|
||||
echo "Fetching %s" "$URL"
|
||||
curl --progress-bar -LO "$URL" || echo "Missing $URL"
|
||||
done
|
||||
|
||||
pwd
|
||||
ls -al --color
|
||||
popd > /dev/null
|
||||
}
|
||||
|
||||
# Pass the name of the binary as input, it will
|
||||
# return the s3 base url
|
||||
function get_s3_url_base() {
|
||||
name=$1
|
||||
case $name in
|
||||
pezkuwi | pezkuwi-execute-worker | pezkuwi-prepare-worker )
|
||||
printf "https://releases.parity.io/pezkuwi"
|
||||
;;
|
||||
|
||||
pezkuwi-teyrchain)
|
||||
printf "https://releases.parity.io/pezkuwi-teyrchain"
|
||||
;;
|
||||
|
||||
pezkuwi-omni-node)
|
||||
printf "https://releases.parity.io/pezkuwi-omni-node"
|
||||
;;
|
||||
|
||||
chain-spec-builder)
|
||||
printf "https://releases.parity.io/chain-spec-builder"
|
||||
;;
|
||||
|
||||
frame-omni-bencher)
|
||||
printf "https://releases.parity.io/frame-omni-bencher"
|
||||
;;
|
||||
*)
|
||||
printf "UNSUPPORTED BINARY $name"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
# Check the checksum for a given binary
|
||||
function check_sha256() {
|
||||
echo "Checking SHA256 for $1"
|
||||
shasum -qc $1.sha256
|
||||
}
|
||||
|
||||
# Import GPG keys of the release team members
|
||||
function import_gpg_keys() {
|
||||
GPG_KEYSERVER=${GPG_KEYSERVER:-"hkps://keyserver.ubuntu.com"}
|
||||
SEC="9D4B2B6EB8F97156D19669A9FF0812D491B96798"
|
||||
EGOR="E6FC4D4782EB0FA64A4903CCDB7D3555DD3932D3"
|
||||
MORGAN="2E92A9D8B15D7891363D1AE8AF9E6C43F7F8C4CF"
|
||||
PARITY_RELEASES="90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE"
|
||||
PARITY_RELEASES_SIGN_COMMITS="D8018FBB3F534D866A45998293C5FB5F6A367B51"
|
||||
|
||||
echo "Importing GPG keys from $GPG_KEYSERVER"
|
||||
for key in $SEC $EGOR $MORGAN $PARITY_RELEASES $PARITY_RELEASES_SIGN_COMMITS; do
|
||||
(
|
||||
echo "Importing GPG key $key"
|
||||
gpg --no-tty --quiet --keyserver $GPG_KEYSERVER --recv-keys $key
|
||||
echo -e "5\ny\n" | gpg --no-tty --command-fd 0 --expert --edit-key $key trust;
|
||||
)
|
||||
done
|
||||
wait
|
||||
gpg -k
|
||||
}
|
||||
|
||||
# Check the GPG signature for a given binary
|
||||
function check_gpg() {
|
||||
echo "Checking GPG Signature for $1"
|
||||
gpg --no-tty --verify -q $1.asc $1
|
||||
}
|
||||
|
||||
# GITHUB_REF will typically be like:
|
||||
# - refs/heads/release-v1.2.3
|
||||
# - refs/heads/release-pezkuwi-v1.2.3-rc2
|
||||
# This function extracts the version
|
||||
function get_version_from_ghref() {
|
||||
GITHUB_REF=$1
|
||||
stripped=${GITHUB_REF#refs/heads/release-}
|
||||
re="v([0-9]+\.[0-9]+\.[0-9]+)"
|
||||
if [[ $stripped =~ $re ]]; then
|
||||
echo ${BASH_REMATCH[0]};
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Get latest rc tag based on the release version and product
|
||||
function get_latest_rc_tag() {
|
||||
version=$1
|
||||
product=$2
|
||||
|
||||
if [[ "$product" == "pezkuwi" ]]; then
|
||||
last_rc=$(git tag -l "$version-rc*" | sort -V | tail -n 1)
|
||||
elif [[ "$product" == "pezkuwi-teyrchain" ]]; then
|
||||
last_rc=$(git tag -l "pezkuwi-teyrchains-$version-rc*" | sort -V | tail -n 1)
|
||||
fi
|
||||
echo "${last_rc}"
|
||||
}
|
||||
|
||||
# Increment rc tag number based on the value of a suffix of the current rc tag
|
||||
function increment_rc_tag() {
|
||||
last_rc=$1
|
||||
|
||||
suffix=$(echo "$last_rc" | grep -Eo '[0-9]+$')
|
||||
((suffix++))
|
||||
echo $suffix
|
||||
}
|
||||
|
||||
function relative_parent() {
|
||||
echo "$1" | sed -E 's/(.*)\/(.*)\/\.\./\1/g'
|
||||
}
|
||||
|
||||
# Find all the runtimes, it returns the result as JSON object, compatible to be
|
||||
# used as Github Workflow Matrix. This call is exposed by the `scan` command and can be used as:
|
||||
# podman run --rm -it -v /.../fellowship-runtimes:/build docker.io/chevdor/srtool:1.70.0-0.11.1 scan
|
||||
function find_runtimes() {
|
||||
libs=($(git grep -I -r --cached --max-depth 20 --files-with-matches '[frame_support::runtime]!' -- '*lib.rs'))
|
||||
re=".*-runtime$"
|
||||
JSON=$(jq --null-input '{ "include": [] }')
|
||||
|
||||
# EXCLUDED_RUNTIMES is a space separated list of runtime names (without the -runtime postfix)
|
||||
# EXCLUDED_RUNTIMES=${EXCLUDED_RUNTIMES:-"substrate-test"}
|
||||
IFS=' ' read -r -a exclusions <<< "$EXCLUDED_RUNTIMES"
|
||||
|
||||
for lib in "${libs[@]}"; do
|
||||
crate_dir=$(dirname "$lib")
|
||||
cargo_toml="$crate_dir/../Cargo.toml"
|
||||
|
||||
name=$(toml get -r $cargo_toml 'package.name')
|
||||
chain=${name//-runtime/}
|
||||
|
||||
if [[ "$name" =~ $re ]] && ! [[ ${exclusions[@]} =~ $chain ]]; then
|
||||
lib_dir=$(dirname "$lib")
|
||||
runtime_dir=$(relative_parent "$lib_dir/..")
|
||||
ITEM=$(jq --null-input \
|
||||
--arg chain "$chain" \
|
||||
--arg name "$name" \
|
||||
--arg runtime_dir "$runtime_dir" \
|
||||
'{ "chain": $chain, "crate": $name, "runtime_dir": $runtime_dir }')
|
||||
JSON=$(echo $JSON | jq ".include += [$ITEM]")
|
||||
fi
|
||||
done
|
||||
echo $JSON
|
||||
}
|
||||
|
||||
# Filter the version matches the particular pattern and return it.
|
||||
# input: version (v1.8.0 or v1.8.0-rc1)
|
||||
# output: none
|
||||
filter_version_from_input() {
|
||||
version=$1
|
||||
regex="^(v)?[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)?$"
|
||||
|
||||
if [[ $version =~ $regex ]]; then
|
||||
echo $version
|
||||
else
|
||||
echo "Invalid version: $version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# Check if the release_id is valid number
|
||||
# input: release_id
|
||||
# output: release_id or exit 1
|
||||
check_release_id() {
|
||||
input=$1
|
||||
|
||||
release_id=$(echo "$input" | sed 's/[^0-9]//g')
|
||||
|
||||
if [[ $release_id =~ ^[0-9]+$ ]]; then
|
||||
echo "$release_id"
|
||||
else
|
||||
echo "Invalid release_id from input: $input"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# Get latest release tag
|
||||
#
|
||||
# input: none
|
||||
# output: latest_release_tag
|
||||
get_latest_release_tag() {
|
||||
TOKEN="Authorization: Bearer $GITHUB_TOKEN"
|
||||
latest_release_tag=$(curl -s -H "$TOKEN" $api_base/pezkuwichain/pezkuwi-sdk/releases/latest | jq -r '.tag_name')
|
||||
printf $latest_release_tag
|
||||
}
|
||||
|
||||
function get_pezkuwi_node_version_from_code() {
|
||||
# list all the files with node version
|
||||
git grep -e "\(NODE_VERSION[^=]*= \)\".*\"" |
|
||||
# fetch only the one we need
|
||||
grep "primitives/src/lib.rs:" |
|
||||
# Print only the version
|
||||
awk '{ print $7 }' |
|
||||
# Remove the quotes
|
||||
sed 's/"//g' |
|
||||
# Remove the semicolon
|
||||
sed 's/;//g'
|
||||
}
|
||||
|
||||
validate_stable_tag() {
|
||||
tag="$1"
|
||||
pattern="^(pezkuwi-)?stable[0-9]{4}(-[0-9]+)?(-rc[0-9]+)?$"
|
||||
|
||||
if [[ $tag =~ $pattern ]]; then
|
||||
echo $tag
|
||||
else
|
||||
echo "The input '$tag' does not match the pattern."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Prepare docker stable tag from the pezkuwi stable tag
|
||||
#
|
||||
# input: tag (pezkuwi-stableYYMM(-X) or pezkuwi-stableYYMM(-X)-rcX)
|
||||
# output: stableYYMM(-X) or stableYYMM(-X)-rcX
|
||||
prepare_docker_stable_tag() {
|
||||
tag="$1"
|
||||
if [[ "$tag" =~ stable[0-9]{4}(-[0-9]+)?(-rc[0-9]+)? ]]; then
|
||||
echo "${BASH_REMATCH[0]}"
|
||||
else
|
||||
echo "Tag is invalid: $tag"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse names of the branches from the github labels based on the pattern
|
||||
#
|
||||
# input: labels (array of lables like ("A3-backport" "RO-silent" "A4-backport-stable2407" "A4-backport-stable2503"))
|
||||
# output: BRANCHES (array of the branch names)
|
||||
parse_branch_names_from_backport_labels() {
|
||||
labels="$1"
|
||||
BRANCHES=""
|
||||
|
||||
for label in $labels; do
|
||||
if [[ "$label" =~ ^A4-backport-(stable|unstable)[0-9]{4}$ ]]; then
|
||||
branch_name=$(sed 's/A4-backport-//' <<< "$label")
|
||||
BRANCHES+=" ${branch_name}"
|
||||
fi
|
||||
done
|
||||
|
||||
BRANCHES=$(echo "$BRANCHES" | sed 's/^ *//')
|
||||
echo "$BRANCHES"
|
||||
}
|
||||
|
||||
# Extract the PR number from the PR title
|
||||
#
|
||||
# input: PR_TITLE
|
||||
# output: PR_NUMBER or exit 1 if the PR title does not contain the PR number
|
||||
extract_pr_number_from_pr_title() {
|
||||
PR_TITLE=$1
|
||||
if [[ "$PR_TITLE" =~ \#([0-9]+) ]]; then
|
||||
PR_NUMBER="${BASH_REMATCH[1]}"
|
||||
else
|
||||
echo "⚠️ The PR title does not contain original PR number. PR title should be in form: [stableBranchName] Backport #originalPRNumber"
|
||||
exit 1
|
||||
fi
|
||||
echo $PR_NUMBER
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
"""
|
||||
Script to deny Git dependencies in the Cargo workspace. Can be passed one optional argument for the
|
||||
root folder. If not provided, it will use the cwd.
|
||||
|
||||
## Usage
|
||||
python3 .github/scripts/deny-git-deps.py pezkuwi-sdk
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from cargo_workspace import Workspace, DependencyLocation
|
||||
|
||||
# Some crates are allowed to have git dependencies until we fix them.
|
||||
ALLOWED_GIT_DEPS = {
|
||||
'subwasmlib': ['pezkuwi-zombienet-sdk-tests'],
|
||||
}
|
||||
|
||||
root = sys.argv[1] if len(sys.argv) > 1 else os.getcwd()
|
||||
workspace = Workspace.from_path(root)
|
||||
errors = []
|
||||
|
||||
def check_dep(dep, used_by):
|
||||
if dep.location != DependencyLocation.GIT:
|
||||
return
|
||||
|
||||
if used_by in ALLOWED_GIT_DEPS.get(dep.name, []):
|
||||
print(f'🤨 Ignoring git dependency {dep.name} in {used_by}')
|
||||
else:
|
||||
errors.append(f'🚫 Found git dependency {dep.name} in {used_by}')
|
||||
|
||||
# Check the workspace dependencies that can be inherited:
|
||||
for dep in workspace.dependencies:
|
||||
check_dep(dep, "workspace")
|
||||
|
||||
if workspace.crates.find_by_name(dep.name):
|
||||
if dep.location != DependencyLocation.PATH:
|
||||
errors.append(f'🚫 Workspace must use path to link local dependency {dep.name}')
|
||||
|
||||
# And the dependencies of each crate:
|
||||
for crate in workspace.crates:
|
||||
for dep in crate.dependencies:
|
||||
check_dep(dep, crate.name)
|
||||
|
||||
if errors:
|
||||
print('❌ Found errors:')
|
||||
for error in errors:
|
||||
print(error)
|
||||
sys.exit(1)
|
||||
+123
@@ -0,0 +1,123 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Zombienet Workflow Dispatcher
|
||||
#
|
||||
# This script triggers GitHub Actions workflows for zombienet tests and monitors their execution.
|
||||
# It can run workflows multiple times for reliability testing and optionally filter tests by pattern.
|
||||
# Results are automatically saved to a timestamped CSV file for analysis.
|
||||
#
|
||||
# Features:
|
||||
# - Trigger workflows on specific branches
|
||||
# - Filter tests by pattern (useful for debugging specific tests)
|
||||
# - Run workflows multiple times for flaky test detection
|
||||
# - Monitor workflow completion and collect results
|
||||
# - Export results to CSV with job details (ID, name, conclusion, timing, URLs)
|
||||
#
|
||||
# Requirements:
|
||||
# - GitHub CLI (gh) must be installed and authenticated
|
||||
# - Must be run from pezkuwi-sdk repository root
|
||||
# - Target branch must have corresponding PR with CI enabled
|
||||
|
||||
# Exit on error
|
||||
# set -e
|
||||
|
||||
function dbg {
|
||||
local msg="$@"
|
||||
|
||||
local tstamp=$(date "+%Y-%m-%d %T")
|
||||
printf "%s - %s\n" "$tstamp" "$msg"
|
||||
}
|
||||
|
||||
function write_job_results_to_csv {
|
||||
local run_id="$1"
|
||||
local branch="$2"
|
||||
local csv_file="$3"
|
||||
|
||||
dbg "Writing job results for run $run_id to $csv_file"
|
||||
|
||||
# Get job details for the completed run, filtering only jobs starting with 'zombienet-' and with success or failure conclusions
|
||||
gh run view "$run_id" --json jobs --jq \
|
||||
'.jobs[] | select(.name | startswith("zombienet-")) |
|
||||
select(.conclusion == "success" or .conclusion == "failure") |
|
||||
[.databaseId, .name, .conclusion, .startedAt, "'"$branch"'", .url] | @csv' >> "$csv_file"
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
WORKFLOW_FILE=""
|
||||
BRANCH=""
|
||||
MAX_RESULT_CNT=-1
|
||||
TEST_PATTERN=""
|
||||
|
||||
while getopts "w:b:m:p:h" opt; do
|
||||
case $opt in
|
||||
w) WORKFLOW_FILE="$OPTARG" ;;
|
||||
b) BRANCH="$OPTARG" ;;
|
||||
m) MAX_RESULT_CNT="$OPTARG" ;;
|
||||
p) TEST_PATTERN="$OPTARG" ;;
|
||||
h) echo "Usage: $0 -w <workflow-file> -b <branch> [-m max-triggers] [-p test-pattern]"
|
||||
echo " -w: Workflow file (required)"
|
||||
echo " -b: Branch name (required)"
|
||||
echo " -m: Maximum number of triggers (optional, default: infinite)"
|
||||
echo " -p: Test pattern for workflow input (optional)"
|
||||
exit 0 ;;
|
||||
\?) echo "Invalid option -$OPTARG" >&2
|
||||
echo "Use -h for help"
|
||||
exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$WORKFLOW_FILE" || -z "$BRANCH" ]]; then
|
||||
echo "Error: Both workflow file (-w) and branch (-b) are required"
|
||||
echo "Usage: $0 -w <workflow-file> -b <branch> [-m max-triggers] [-p test-pattern]"
|
||||
echo "Use -h for help"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create CSV file with headers
|
||||
CSV_FILE="workflow_results_$(date +%Y%m%d_%H%M%S).csv"
|
||||
echo "job_id,job_name,conclusion,started_at,branch,job_url" > "$CSV_FILE"
|
||||
dbg "Created CSV file: $CSV_FILE"
|
||||
|
||||
dbg "Starting loop for workflow: $WORKFLOW_FILE on branch: $BRANCH"
|
||||
|
||||
TRIGGER_CNT=0
|
||||
RESULT_CNT=0
|
||||
|
||||
while [[ $MAX_RESULT_CNT -eq -1 || $RESULT_CNT -lt $MAX_RESULT_CNT ]]; do
|
||||
|
||||
dbg "Waiting until workflow $WORKFLOW_FILE (branch: $BRANCH) jobs are completed"
|
||||
|
||||
while true ; do
|
||||
echo ""
|
||||
gh run list --workflow=$WORKFLOW_FILE -e workflow_dispatch -b $BRANCH -L 5
|
||||
sleep 2
|
||||
# if job is completed it should have non-empty conclusion field
|
||||
ALL_JOBS_COMPLETED=$(gh run list --workflow=$WORKFLOW_FILE -e workflow_dispatch -b $BRANCH --json conclusion --jq 'all(.[]; .conclusion != "")')
|
||||
if [[ "$ALL_JOBS_COMPLETED" == "true" ]]; then
|
||||
break
|
||||
fi
|
||||
sleep 60
|
||||
done
|
||||
dbg "Workflow $WORKFLOW_FILE (branch: $BRANCH) jobs completed"
|
||||
|
||||
# Skip the first iteration - latest run id is not the one we triggered here
|
||||
if [ $TRIGGER_CNT -gt 0 ]; then
|
||||
# Get the most recent completed run ID and write job results to CSV
|
||||
LATEST_RUN_ID=$(gh run list --workflow=$WORKFLOW_FILE -e workflow_dispatch -b $BRANCH -L 1 --json databaseId --jq '.[0].databaseId')
|
||||
write_job_results_to_csv "$LATEST_RUN_ID" "$BRANCH" "$CSV_FILE"
|
||||
RESULT_CNT=$(( RESULT_CNT + 1 ))
|
||||
fi
|
||||
|
||||
TRIGGER_CNT=$(( TRIGGER_CNT + 1 ))
|
||||
dbg "Triggering #$TRIGGER_CNT workflow $WORKFLOW_FILE (branch: $BRANCH)"
|
||||
|
||||
if [[ -n "$TEST_PATTERN" ]]; then
|
||||
gh workflow run "$WORKFLOW_FILE" --ref "$BRANCH" -f test_pattern="$TEST_PATTERN"
|
||||
else
|
||||
gh workflow run "$WORKFLOW_FILE" --ref "$BRANCH"
|
||||
fi
|
||||
|
||||
dbg "Sleeping 60s"
|
||||
sleep 60
|
||||
done
|
||||
|
||||
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Generate the PrDoc for a Pull Request with a specific number, audience and bump level.
|
||||
|
||||
It downloads and parses the patch from the GitHub API to opulate the prdoc with all modified crates.
|
||||
This will delete any prdoc that already exists for the PR if `--force` is passed.
|
||||
|
||||
Usage:
|
||||
python generate-prdoc.py --pr 1234 --audience node_dev --bump patch
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import toml
|
||||
import yaml
|
||||
import requests
|
||||
|
||||
from github import Github
|
||||
import whatthepatch
|
||||
from cargo_workspace import Workspace
|
||||
|
||||
# Download the patch and pass the info into `create_prdoc`.
|
||||
def from_pr_number(n, audience, bump, force):
|
||||
print(f"Fetching PR '{n}' from GitHub")
|
||||
g = Github()
|
||||
|
||||
repo = g.get_repo("pezkuwichain/pezkuwi-sdk")
|
||||
pr = repo.get_pull(n)
|
||||
|
||||
patch_url = pr.patch_url
|
||||
patch = requests.get(patch_url).text
|
||||
|
||||
create_prdoc(n, audience, pr.title, pr.body, patch, bump, force)
|
||||
|
||||
def translate_audience(audience):
|
||||
aliases = {
|
||||
'runtime_dev': 'Runtime Dev',
|
||||
'runtime_user': 'Runtime User',
|
||||
'node_dev': 'Node Dev',
|
||||
'node_operator': 'Node Operator',
|
||||
'todo': 'Todo',
|
||||
}
|
||||
|
||||
mapped = [aliases.get(a) for a in audience]
|
||||
if len(mapped) == 1:
|
||||
mapped = mapped[0]
|
||||
|
||||
print(f"Translated audience '{audience}' to '{mapped}'")
|
||||
return mapped
|
||||
|
||||
def create_prdoc(pr, audience, title, description, patch, bump, force):
|
||||
path = f"prdoc/pr_{pr}.prdoc"
|
||||
|
||||
if os.path.exists(path):
|
||||
if force == True:
|
||||
print(f"Overwriting existing PrDoc for PR {pr}")
|
||||
else:
|
||||
print(f"PrDoc already exists for PR {pr}. Use --force to overwrite.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(f"No preexisting PrDoc for PR {pr}")
|
||||
|
||||
prdoc = { "title": title, "doc": [{}], "crates": [] }
|
||||
audience = translate_audience(audience)
|
||||
|
||||
prdoc["doc"][0]["audience"] = audience
|
||||
prdoc["doc"][0]["description"] = description
|
||||
|
||||
workspace = Workspace.from_path(".")
|
||||
|
||||
modified_paths = []
|
||||
for diff in whatthepatch.parse_patch(patch):
|
||||
new_path = diff.header.new_path
|
||||
# Sometimes this lib returns `/dev/null` as the new path...
|
||||
if not new_path.startswith("/dev"):
|
||||
modified_paths.append(new_path)
|
||||
|
||||
modified_crates = {}
|
||||
for p in modified_paths:
|
||||
# Go up until we find a Cargo.toml
|
||||
p = os.path.join(workspace.path, p)
|
||||
while not os.path.exists(os.path.join(p, "Cargo.toml")):
|
||||
if p == '/':
|
||||
exit(1)
|
||||
p = os.path.dirname(p)
|
||||
|
||||
with open(os.path.join(p, "Cargo.toml")) as f:
|
||||
manifest = toml.load(f)
|
||||
|
||||
if not "package" in manifest:
|
||||
continue
|
||||
|
||||
crate_name = manifest["package"]["name"]
|
||||
if workspace.crate_by_name(crate_name).publish:
|
||||
modified_crates[crate_name] = True
|
||||
else:
|
||||
print(f"Skipping unpublished crate: {crate_name}")
|
||||
|
||||
for crate_name in modified_crates.keys():
|
||||
entry = { "name": crate_name }
|
||||
|
||||
if bump == 'silent' or bump == 'ignore' or bump == 'no change':
|
||||
entry["validate"] = False
|
||||
else:
|
||||
entry["bump"] = bump
|
||||
|
||||
print(f"Adding crate {entry}")
|
||||
prdoc["crates"].append(entry)
|
||||
|
||||
# write the parsed PR documentation back to the file
|
||||
with open(path, "w") as f:
|
||||
yaml.dump(prdoc, f, sort_keys=False)
|
||||
print(f"PrDoc for PR {pr} written to {path}")
|
||||
|
||||
# Make the `description` a multiline string instead of escaping \r\n.
|
||||
def setup_yaml():
|
||||
def yaml_multiline_string_presenter(dumper, data):
|
||||
if len(data.splitlines()) > 1:
|
||||
data = '\n'.join([line.rstrip() for line in data.strip().splitlines()])
|
||||
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
|
||||
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
|
||||
|
||||
yaml.add_representer(str, yaml_multiline_string_presenter)
|
||||
|
||||
# parse_args is also used by cmd/cmd.py
|
||||
# if pr_required is False, then --pr is optional, as it can be derived from the PR comment body
|
||||
def setup_parser(parser=None, pr_required=True):
|
||||
allowed_audiences = ["runtime_dev", "runtime_user", "node_dev", "node_operator", "todo"]
|
||||
if parser is None:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--pr", type=int, required=pr_required, help="The PR number to generate the PrDoc for.")
|
||||
parser.add_argument("--audience", type=str, nargs='*', choices=allowed_audiences, default=["todo"], help="The audience of whom the changes may concern. Example: --audience runtime_dev node_dev")
|
||||
parser.add_argument("--bump", type=str, default="major", choices=["patch", "minor", "major", "silent", "ignore", "none"], help="A default bump level for all crates. Example: --bump patch")
|
||||
parser.add_argument("--force", action="store_true", help="Whether to overwrite any existing PrDoc.")
|
||||
return parser
|
||||
|
||||
def snake_to_title(s):
|
||||
return ' '.join(word.capitalize() for word in s.split('_'))
|
||||
|
||||
def main(args):
|
||||
print(f"Args: {args}, force: {args.force}")
|
||||
setup_yaml()
|
||||
try:
|
||||
from_pr_number(args.pr, args.audience, args.bump, args.force)
|
||||
return 0
|
||||
except Exception as e:
|
||||
print(f"Error generating prdoc: {e}")
|
||||
return 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = setup_parser()
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
@@ -0,0 +1,6 @@
|
||||
requests
|
||||
cargo-workspace
|
||||
PyGithub
|
||||
whatthepatch
|
||||
pyyaml
|
||||
toml
|
||||
Executable
+136
@@ -0,0 +1,136 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
A script to generate READMEs for all public crates,
|
||||
if they do not already have one.
|
||||
|
||||
It relies on functions from the `check-workspace.py` script.
|
||||
|
||||
The resulting README is based on a template defined below,
|
||||
and includes the crate name, description, license,
|
||||
and optionally - the SDK release version.
|
||||
|
||||
# Example
|
||||
|
||||
```sh
|
||||
python3 -m pip install toml
|
||||
.github/scripts/generate-readmes.py . --sdk-version 1.15.0
|
||||
```
|
||||
"""
|
||||
|
||||
import os
|
||||
import toml
|
||||
import importlib
|
||||
import argparse
|
||||
|
||||
check_workspace = importlib.import_module("check-workspace")
|
||||
|
||||
README_TEMPLATE = """<div align="center">
|
||||
|
||||
<img src="https://raw.githubusercontent.com/pezkuwichain/pezkuwi-sdk/master/docs/images/Pezkuwi_Logo.png" alt="Pezkuwi logo" width="200">
|
||||
|
||||
# {name}
|
||||
|
||||
This crate is part of the [Pezkuwi SDK](https://github.com/pezkuwichain/pezkuwi-sdk/).
|
||||
|
||||
</div>
|
||||
|
||||
## Description
|
||||
|
||||
{description}
|
||||
|
||||
## Additional Resources
|
||||
|
||||
In order to learn about Pezkuwi SDK, head over to the [Pezkuwi SDK Developer Documentation](https://pezkuwichain.github.io/pezkuwi-sdk/master/pezkuwi_sdk_docs/index.html).
|
||||
|
||||
To learn about Pezkuwi, visit [pezkuwichain.io](https://pezkuwichain.io/).
|
||||
|
||||
## License
|
||||
|
||||
This crate is licensed with {license}.
|
||||
"""
|
||||
|
||||
VERSION_TEMPLATE = """
|
||||
## Version
|
||||
|
||||
This version of `{name}` is associated with Pezkuwi {sdk_version} release.
|
||||
"""
|
||||
|
||||
|
||||
def generate_readme(member, *, workspace_dir, workspace_license, sdk_version):
|
||||
print(f"Loading manifest for: {member}")
|
||||
manifest = toml.load(os.path.join(workspace_dir, member, "Cargo.toml"))
|
||||
if manifest["package"].get("publish", True) == False:
|
||||
print(f"⏩ Skipping un-published crate: {member}")
|
||||
return
|
||||
if os.path.exists(os.path.join(workspace_dir, member, "README.md")):
|
||||
print(f"⏩ Skipping crate with an existing readme: {member}")
|
||||
return
|
||||
print(f"📝 Generating README for: {member}")
|
||||
|
||||
license = manifest["package"]["license"]
|
||||
if isinstance(license, dict):
|
||||
if not license.get("workspace", False):
|
||||
print(
|
||||
f"❌ License for {member} is unexpectedly declared as workspace=false."
|
||||
)
|
||||
# Skipping this crate as it is not clear what license it should use.
|
||||
return
|
||||
license = workspace_license
|
||||
|
||||
name = manifest["package"]["name"]
|
||||
description = manifest["package"]["description"]
|
||||
description = description + "." if not description.endswith(".") else description
|
||||
|
||||
filled_readme = README_TEMPLATE.format(
|
||||
name=name, description=description, license=license
|
||||
)
|
||||
|
||||
if sdk_version:
|
||||
filled_readme += VERSION_TEMPLATE.format(name=name, sdk_version=sdk_version)
|
||||
|
||||
with open(os.path.join(workspace_dir, member, "README.md"), "w") as new_readme:
|
||||
new_readme.write(filled_readme)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate readmes for published crates."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"workspace_dir",
|
||||
help="The directory to check",
|
||||
metavar="workspace_dir",
|
||||
type=str,
|
||||
nargs=1,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sdk-version",
|
||||
help="Optional SDK release version",
|
||||
metavar="sdk_version",
|
||||
type=str,
|
||||
nargs=1,
|
||||
required=False,
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
return (args.workspace_dir[0], args.sdk_version[0] if args.sdk_version else None)
|
||||
|
||||
|
||||
def main():
|
||||
(workspace_dir, sdk_version) = parse_args()
|
||||
root_manifest = toml.load(os.path.join(workspace_dir, "Cargo.toml"))
|
||||
workspace_license = root_manifest["workspace"]["package"]["license"]
|
||||
members = check_workspace.get_members(workspace_dir, [])
|
||||
for member in members:
|
||||
generate_readme(
|
||||
member,
|
||||
workspace_dir=workspace_dir,
|
||||
workspace_license=workspace_license,
|
||||
sdk_version=sdk_version,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Zombienet Test Matrix Parser
|
||||
|
||||
This script parses YAML test definition files and converts them to JSON format
|
||||
for use as GitHub Actions matrix jobs. It provides filtering capabilities to:
|
||||
|
||||
1. Exclude flaky tests (unless a specific test pattern is provided)
|
||||
2. Filter tests by name pattern for targeted execution
|
||||
3. Convert YAML test definitions to JSON matrix format
|
||||
|
||||
The script is used by GitHub Actions workflows to dynamically generate
|
||||
test matrices based on YAML configuration files, enabling flexible
|
||||
test execution and maintenance.
|
||||
|
||||
Usage:
|
||||
python parse-zombienet-tests.py --matrix tests.yml [--flaky-tests flaky.txt] [--test-pattern pattern]
|
||||
|
||||
Output:
|
||||
JSON array of test job objects suitable for GitHub Actions matrix strategy
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import yaml
|
||||
import json
|
||||
import re
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="Parse test matrix YAML file with optional filtering")
|
||||
parser.add_argument("--matrix", required=True, help="Path to the YAML matrix file")
|
||||
parser.add_argument("--flaky-tests", default="", help="Newline-separated list of flaky job names")
|
||||
parser.add_argument("--test-pattern", default="", help="Regex pattern to match job_name")
|
||||
return parser.parse_args()
|
||||
|
||||
def load_jobs(matrix_path):
|
||||
with open(matrix_path, "r") as f:
|
||||
return yaml.safe_load(f)
|
||||
|
||||
def filter_jobs(jobs, flaky_tests, test_pattern):
|
||||
flaky_set = set(name.strip() for name in flaky_tests.splitlines() if name.strip())
|
||||
filtered = []
|
||||
|
||||
for job in jobs:
|
||||
name = job.get("job-name", "")
|
||||
|
||||
# If test_pattern provided then don't care about flaky tests, just check test_pattern
|
||||
if test_pattern and len(test_pattern) > 0:
|
||||
if re.search(test_pattern, name):
|
||||
filtered.append(job)
|
||||
elif name not in flaky_set:
|
||||
filtered.append(job)
|
||||
|
||||
return filtered
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
jobs = load_jobs(args.matrix)
|
||||
result = filter_jobs(jobs, args.flaky_tests, args.test_pattern)
|
||||
print(json.dumps(result))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,259 @@
|
||||
"""
|
||||
This script is used to turn the JSON report produced by the revive differential tests tool into an
|
||||
easy to consume markdown document for the purpose of reporting this information in the Pezkuwi SDK
|
||||
CI. The full models used in the JSON report can be found in the revive differential tests repo and
|
||||
the models used in this script are just a partial reproduction of the full report models.
|
||||
"""
|
||||
|
||||
import json, typing, io, sys
|
||||
|
||||
|
||||
class Report(typing.TypedDict):
|
||||
context: "Context"
|
||||
execution_information: dict["MetadataFilePathString", "MetadataFileReport"]
|
||||
|
||||
|
||||
class MetadataFileReport(typing.TypedDict):
|
||||
case_reports: dict["CaseIdxString", "CaseReport"]
|
||||
|
||||
|
||||
class CaseReport(typing.TypedDict):
|
||||
mode_execution_reports: dict["ModeString", "ExecutionReport"]
|
||||
|
||||
|
||||
class ExecutionReport(typing.TypedDict):
|
||||
status: "TestCaseStatus"
|
||||
|
||||
|
||||
class Context(typing.TypedDict):
|
||||
Test: "TestContext"
|
||||
|
||||
|
||||
class TestContext(typing.TypedDict):
|
||||
corpus_configuration: "CorpusConfiguration"
|
||||
|
||||
|
||||
class CorpusConfiguration(typing.TypedDict):
|
||||
test_specifiers: list["TestSpecifier"]
|
||||
|
||||
|
||||
class CaseStatusSuccess(typing.TypedDict):
|
||||
status: typing.Literal["Succeeded"]
|
||||
steps_executed: int
|
||||
|
||||
|
||||
class CaseStatusFailure(typing.TypedDict):
|
||||
status: typing.Literal["Failed"]
|
||||
reason: str
|
||||
|
||||
|
||||
class CaseStatusIgnored(typing.TypedDict):
|
||||
status: typing.Literal["Ignored"]
|
||||
reason: str
|
||||
|
||||
|
||||
TestCaseStatus = typing.Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
|
||||
"""A union type of all of the possible statuses that could be reported for a case."""
|
||||
|
||||
TestSpecifier = str
|
||||
"""A test specifier string. For example resolc-compiler-tests/fixtures/solidity/test.json::0::Y+"""
|
||||
|
||||
ModeString = str
|
||||
"""The mode string. For example Y+ >=0.8.13"""
|
||||
|
||||
MetadataFilePathString = str
|
||||
"""The path to a metadata file. For example resolc-compiler-tests/fixtures/solidity/test.json"""
|
||||
|
||||
CaseIdxString = str
|
||||
"""The index of a case as a string. For example '0'"""
|
||||
|
||||
PlatformString = typing.Union[
|
||||
typing.Literal["revive-dev-node-revm-solc"],
|
||||
typing.Literal["revive-dev-node-polkavm-resolc"],
|
||||
]
|
||||
"""A string of the platform on which the test was run"""
|
||||
|
||||
|
||||
def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
|
||||
"""
|
||||
Given a path, this function returns the path relative to the resolc-compiler-test directory. The
|
||||
following is an example of an input and an output:
|
||||
|
||||
Input: ~/pezkuwi-sdk/revive-differential-tests/resolc-compiler-tests/fixtures/solidity/test.json
|
||||
Output: test.json
|
||||
"""
|
||||
|
||||
return f"{path.split('resolc-compiler-tests/fixtures/solidity')[-1].strip('/')}"
|
||||
|
||||
|
||||
def main() -> None:
|
||||
with open(sys.argv[1], "r") as file:
|
||||
report: Report = json.load(file)
|
||||
|
||||
# Getting the platform string and resolving it into a simpler version of
|
||||
# itself.
|
||||
platform_identifier: PlatformString = typing.cast(PlatformString, sys.argv[2])
|
||||
if platform_identifier == "revive-dev-node-polkavm-resolc":
|
||||
platform: str = "PolkaVM"
|
||||
elif platform_identifier == "revive-dev-node-revm-solc":
|
||||
platform: str = "REVM"
|
||||
else:
|
||||
platform: str = platform_identifier
|
||||
|
||||
# Starting the markdown document and adding information to it as we go.
|
||||
markdown_document: io.TextIOWrapper = open("report.md", "w")
|
||||
print(f"# Differential Tests Results ({platform})", file=markdown_document)
|
||||
|
||||
# Getting all of the test specifiers from the report and making them relative to the tests dir.
|
||||
test_specifiers: list[str] = list(
|
||||
map(
|
||||
path_relative_to_resolc_compiler_test_directory,
|
||||
report["context"]["Test"]["corpus_configuration"]["test_specifiers"],
|
||||
)
|
||||
)
|
||||
print("## Specified Tests", file=markdown_document)
|
||||
for test_specifier in test_specifiers:
|
||||
print(f"* ``{test_specifier}``", file=markdown_document)
|
||||
|
||||
# Counting the total number of test cases, successes, failures, and ignored tests
|
||||
total_number_of_cases: int = 0
|
||||
total_number_of_successes: int = 0
|
||||
total_number_of_failures: int = 0
|
||||
total_number_of_ignores: int = 0
|
||||
for _, mode_to_case_mapping in report["execution_information"].items():
|
||||
for _, case_idx_to_report_mapping in mode_to_case_mapping[
|
||||
"case_reports"
|
||||
].items():
|
||||
for _, execution_report in case_idx_to_report_mapping[
|
||||
"mode_execution_reports"
|
||||
].items():
|
||||
status: TestCaseStatus = execution_report["status"]
|
||||
|
||||
total_number_of_cases += 1
|
||||
if status["status"] == "Succeeded":
|
||||
total_number_of_successes += 1
|
||||
elif status["status"] == "Failed":
|
||||
total_number_of_failures += 1
|
||||
elif status["status"] == "Ignored":
|
||||
total_number_of_ignores += 1
|
||||
else:
|
||||
raise Exception(
|
||||
f"Encountered a status that's unknown to the script: {status}"
|
||||
)
|
||||
|
||||
print("## Counts", file=markdown_document)
|
||||
print(
|
||||
f"* **Total Number of Test Cases:** {total_number_of_cases}",
|
||||
file=markdown_document,
|
||||
)
|
||||
print(
|
||||
f"* **Total Number of Successes:** {total_number_of_successes}",
|
||||
file=markdown_document,
|
||||
)
|
||||
print(
|
||||
f"* **Total Number of Failures:** {total_number_of_failures}",
|
||||
file=markdown_document,
|
||||
)
|
||||
print(
|
||||
f"* **Total Number of Ignores:** {total_number_of_ignores}",
|
||||
file=markdown_document,
|
||||
)
|
||||
|
||||
# Grouping the various test cases into dictionaries and groups depending on their status to make
|
||||
# them easier to include in the markdown document later on.
|
||||
successful_cases: dict[
|
||||
MetadataFilePathString, dict[CaseIdxString, set[ModeString]]
|
||||
] = {}
|
||||
for metadata_file_path, mode_to_case_mapping in report[
|
||||
"execution_information"
|
||||
].items():
|
||||
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
|
||||
"case_reports"
|
||||
].items():
|
||||
for mode_string, execution_report in case_idx_to_report_mapping[
|
||||
"mode_execution_reports"
|
||||
].items():
|
||||
status: TestCaseStatus = execution_report["status"]
|
||||
metadata_file_path: str = (
|
||||
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
||||
)
|
||||
mode_string: str = mode_string.replace(" M3", "+").replace(" M0", "-")
|
||||
|
||||
if status["status"] == "Succeeded":
|
||||
successful_cases.setdefault(
|
||||
metadata_file_path,
|
||||
{},
|
||||
).setdefault(
|
||||
case_idx_string, set()
|
||||
).add(mode_string)
|
||||
|
||||
print("## Failures", file=markdown_document)
|
||||
print(
|
||||
"The test specifiers seen in this section have the format 'path::case_idx::compilation_mode'\
|
||||
and they're compatible with the revive differential tests framework and can be specified\
|
||||
to it directly in the same way that they're provided through the `--test` argument of the\
|
||||
framework.\n",
|
||||
file=markdown_document,
|
||||
)
|
||||
print(
|
||||
"The failures are provided in an expandable section to ensure that the PR does not get \
|
||||
polluted with information. Please click on the section below for more information",
|
||||
file=markdown_document,
|
||||
)
|
||||
print(
|
||||
"<details><summary>Detailed Differential Tests Failure Information</summary>\n\n",
|
||||
file=markdown_document,
|
||||
)
|
||||
print("| Test Specifier | Failure Reason | Note |", file=markdown_document)
|
||||
print("| -- | -- | -- |", file=markdown_document)
|
||||
|
||||
for metadata_file_path, mode_to_case_mapping in report[
|
||||
"execution_information"
|
||||
].items():
|
||||
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
|
||||
"case_reports"
|
||||
].items():
|
||||
for mode_string, execution_report in case_idx_to_report_mapping[
|
||||
"mode_execution_reports"
|
||||
].items():
|
||||
status: TestCaseStatus = execution_report["status"]
|
||||
metadata_file_path: str = (
|
||||
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
||||
)
|
||||
mode_string: str = mode_string.replace(" M3", "+").replace(" M0", "-")
|
||||
|
||||
if status["status"] != "Failed":
|
||||
continue
|
||||
|
||||
failure_reason: str = (
|
||||
status["reason"].replace("\n", " ").replace("|", " ")
|
||||
)
|
||||
|
||||
note: str = ""
|
||||
modes_where_this_case_succeeded: set[ModeString] = (
|
||||
successful_cases.setdefault(
|
||||
metadata_file_path,
|
||||
{},
|
||||
).setdefault(case_idx_string, set())
|
||||
)
|
||||
if len(modes_where_this_case_succeeded) != 0:
|
||||
note: str = (
|
||||
f"This test case succeeded with other compilation modes: {modes_where_this_case_succeeded}"
|
||||
)
|
||||
|
||||
test_specifier: str = (
|
||||
f"{metadata_file_path}::{case_idx_string}::{mode_string}"
|
||||
)
|
||||
print(
|
||||
f"| ``{test_specifier}`` | ``{failure_reason}`` | {note} |",
|
||||
file=markdown_document,
|
||||
)
|
||||
print("\n\n</details>", file=markdown_document)
|
||||
|
||||
# The primary downside of not using `with`, but I guess it's better since I don't want to over
|
||||
# indent the code.
|
||||
markdown_document.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Executable
+213
@@ -0,0 +1,213 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# This script processes logs produced by nodes spawned using the zombienet-sdk framework.
|
||||
# The logs are prepared for upload as GitHub artifacts.
|
||||
# If Loki logging is available, the corresponding log URLs are also printed.
|
||||
# NOTE: P2838773B5F7DE937 is the loki.cicd until we switch to loki.zombienet
|
||||
LOKI_URL_FOR_NODE='https://grafana.teleport.parity.io/explore?orgId=1&left=%7B%22datasource%22:%22P2838773B5F7DE937%22,%22queries%22:%5B%7B%22refId%22:%22A%22,%22datasource%22:%7B%22type%22:%22loki%22,%22uid%22:%22P2838773B5F7DE937%22%7D,%22editorMode%22:%22code%22,%22expr%22:%22%7Bzombie_ns%3D%5C%22{{namespace}}%5C%22,zombie_node%3D%5C%22{{podName}}%5C%22%7D%22,%22queryType%22:%22range%22%7D%5D,%22range%22:%7B%22from%22:%22{{from}}%22,%22to%22:%22{{to}}%22%7D%7D'
|
||||
|
||||
LOKI_DIR_FOR_NATIVE_LOGS="/tmp/zombienet"
|
||||
|
||||
# JQ queries
|
||||
JQ_QUERY_RELAY_V1='.relay[].name'
|
||||
JQ_QUERY_RELAY_SDK='.relay.nodes[].name'
|
||||
|
||||
JQ_QUERY_PARA_NODES_V1='.paras[$pid].nodes[].name'
|
||||
JQ_QUERY_PARA_NODES_SDK='.teyrchains[$pid][] .collators[].name'
|
||||
|
||||
# current time in milliseconds + 60 secs to allow loki to ingest logs
|
||||
TO=$(($(date +%s%3N) + 60000))
|
||||
|
||||
make_url() {
|
||||
local name="$1"
|
||||
local to="$2"
|
||||
local url="${LOKI_URL_FOR_NODE//\{\{namespace\}\}/$NS}"
|
||||
url="${url//\{\{podName\}\}/$name}"
|
||||
url="${url//\{\{from\}\}/$FROM}"
|
||||
url="${url//\{\{to\}\}/$to}"
|
||||
echo "$url"
|
||||
}
|
||||
|
||||
# Since we don't have the zombie.json file, we will make the best-effort to send the logs
|
||||
process_logs_from_fallback() {
|
||||
local BASE_DIR="$1"
|
||||
local TARGET_DIR="$2"
|
||||
|
||||
# Extract namespace from BASE_DIR (e.g., /tmp/zombie-abc123 -> zombie-abc123)
|
||||
NS=$(basename "$BASE_DIR")
|
||||
echo "Using fallback mode for namespace: $NS"
|
||||
|
||||
# Use current time as FROM since we don't have zombie.json
|
||||
FROM=$(($(date +%s%3N) - 600000)) # 10 minutes ago
|
||||
|
||||
# Find all logs with glob patterns
|
||||
local log_files=()
|
||||
|
||||
# Search for SDK pattern: BASE_DIR/<name>/<name>.log
|
||||
if [[ -d "$BASE_DIR" ]]; then
|
||||
for node_dir in "$BASE_DIR"/*; do
|
||||
if [[ -d "$node_dir" && "$node_dir" != "$TARGET_DIR" ]]; then
|
||||
local node_name=$(basename "$node_dir")
|
||||
if [[ -f "$node_dir/$node_name.log" ]]; then
|
||||
log_files+=("$node_dir/$node_name.log")
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Search for v1 pattern: BASE_DIR/logs/<name>.log
|
||||
if [[ -d "$TARGET_DIR" ]]; then
|
||||
for log_file in "$TARGET_DIR"/*.log; do
|
||||
if [[ -f "$log_file" ]]; then
|
||||
log_files+=("$log_file")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ ${#log_files[@]} -eq 0 ]]; then
|
||||
echo "::warning ::No log files found in $BASE_DIR using glob patterns"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Found ${#log_files[@]} log file(s) using glob patterns"
|
||||
echo "Nodes:"
|
||||
|
||||
for log_file in "${log_files[@]}"; do
|
||||
# Extract node name from log file path
|
||||
local name=$(basename "$log_file" .log)
|
||||
local_to=$TO
|
||||
|
||||
# Copy log to target directory if not already there
|
||||
if [[ "$log_file" != "$TARGET_DIR/$name.log" ]]; then
|
||||
if ! cp "$log_file" "$TARGET_DIR/$name.log" 2>/dev/null; then
|
||||
echo "::warning ::Failed to copy log for $name"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
# Send logs to loki
|
||||
if [[ -d "$LOKI_DIR_FOR_NATIVE_LOGS" ]]; then
|
||||
if [[ -f "$TARGET_DIR/$name.log" ]]; then
|
||||
awk -v NS="$NS" -v NAME="$name" '{print NS" "NAME" " $0}' "$TARGET_DIR/$name.log" >> "$LOKI_DIR_FOR_NATIVE_LOGS/to-loki.log"
|
||||
local_to=$(($(date +%s%3N) + 60000))
|
||||
fi
|
||||
fi
|
||||
echo -e "\t$name: $(make_url "$name" "$local_to")"
|
||||
done
|
||||
echo ""
|
||||
}
|
||||
|
||||
process_logs_from_zombie_file() {
|
||||
local BASE_DIR="$1"
|
||||
local TARGET_DIR="$2"
|
||||
local ZOMBIE_JSON="$3"
|
||||
|
||||
# Extract namespace (ns in sdk / namespace in v1)
|
||||
NS=$(jq -r '.ns // .namespace' "$ZOMBIE_JSON")
|
||||
# test start time in milliseconds
|
||||
FROM=$(jq -r '.start_time_ts' "$ZOMBIE_JSON")
|
||||
|
||||
echo "Relay nodes:"
|
||||
|
||||
JQ_QUERY_RELAY=$JQ_QUERY_RELAY_V1
|
||||
JQ_QUERY_PARA_NODES=$JQ_QUERY_PARA_NODES_V1
|
||||
if [[ $(echo "$NS" | grep -E "zombie-[A-Fa-f0-9]+-") ]]; then
|
||||
JQ_QUERY_RELAY=$JQ_QUERY_RELAY_SDK
|
||||
JQ_QUERY_PARA_NODES=$JQ_QUERY_PARA_NODES_SDK
|
||||
fi;
|
||||
|
||||
jq -r $JQ_QUERY_RELAY "$ZOMBIE_JSON" | while read -r name; do
|
||||
[[ -z "$name" ]] && continue
|
||||
local_to=$TO
|
||||
if [[ "${ZOMBIE_PROVIDER:-}" == "k8s" ]]; then
|
||||
# Fetching logs from k8s
|
||||
if ! kubectl logs "$name" -c "$name" -n "$NS" > "$TARGET_DIR/$name.log" 2>&1; then
|
||||
echo "::warning ::Failed to fetch logs for $name"
|
||||
fi
|
||||
else
|
||||
# zombienet v1 dump the logs to the `/logs` directory
|
||||
if [[ ! -f "$TARGET_DIR/$name.log" ]]; then
|
||||
# `sdk` use this pattern to store the logs in native provider
|
||||
if [[ -f "$BASE_DIR/$name/$name.log" ]]; then
|
||||
cp "$BASE_DIR/$name/$name.log" "$TARGET_DIR/$name.log"
|
||||
else
|
||||
echo "::warning ::Log file not found: $BASE_DIR/$name/$name.log"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
# send logs to loki
|
||||
if [[ -d "$LOKI_DIR_FOR_NATIVE_LOGS" && -f "$TARGET_DIR/$name.log" ]]; then
|
||||
awk -v NS="$NS" -v NAME="$name" '{print NS" "NAME" " $0}' "$TARGET_DIR/$name.log" >> "$LOKI_DIR_FOR_NATIVE_LOGS/to-loki.log"
|
||||
local_to=$(($(date +%s%3N) + 60000))
|
||||
fi
|
||||
fi
|
||||
echo -e "\t$name: $(make_url "$name" "$local_to")"
|
||||
done
|
||||
echo ""
|
||||
|
||||
# Handle teyrchains grouped by paraId
|
||||
jq -r '.paras // .teyrchains | to_entries[] | "\(.key)"' "$ZOMBIE_JSON" | while read -r para_id; do
|
||||
echo "ParaId: $para_id"
|
||||
jq -r --arg pid "$para_id" "$JQ_QUERY_PARA_NODES" "$ZOMBIE_JSON" | while read -r name; do
|
||||
[[ -z "$name" ]] && continue
|
||||
local_to=$TO
|
||||
if [[ "${ZOMBIE_PROVIDER:-}" == "k8s" ]]; then
|
||||
# Fetching logs from k8s
|
||||
if ! kubectl logs "$name" -c "$name" -n "$NS" > "$TARGET_DIR/$name.log" 2>&1; then
|
||||
echo "::warning ::Failed to fetch logs for $name"
|
||||
fi
|
||||
else
|
||||
# zombienet v1 dump the logs to the `/logs` directory
|
||||
if [[ ! -f "$TARGET_DIR/$name.log" ]]; then
|
||||
# `sdk` use this pattern to store the logs in native provider
|
||||
if [[ -f "$BASE_DIR/$name/$name.log" ]]; then
|
||||
cp "$BASE_DIR/$name/$name.log" "$TARGET_DIR/$name.log"
|
||||
else
|
||||
echo "::warning ::Log file not found: $BASE_DIR/$name/$name.log"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
# send logs to loki
|
||||
if [[ -d "$LOKI_DIR_FOR_NATIVE_LOGS" && -f "$TARGET_DIR/$name.log" ]]; then
|
||||
awk -v NS="$NS" -v NAME="$name" '{print NS" "NAME" " $0}' "$TARGET_DIR/$name.log" >> "$LOKI_DIR_FOR_NATIVE_LOGS/to-loki.log"
|
||||
local_to=$(($(date +%s%3N) + 60000))
|
||||
fi
|
||||
fi
|
||||
echo -e "\t$name: $(make_url "$name" "$local_to")"
|
||||
done
|
||||
echo ""
|
||||
done
|
||||
}
|
||||
|
||||
# Main execution - Process all zombie-* directories (supports rstest with multiple tests per job)
|
||||
BASE_DIRS=$(ls -dt /tmp/zombie-* 2>/dev/null || true)
|
||||
|
||||
if [[ -z "$BASE_DIRS" ]]; then
|
||||
echo "No zombie directories found in /tmp/zombie-*"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for BASE_DIR in $BASE_DIRS; do
|
||||
echo "Processing directory: $BASE_DIR"
|
||||
|
||||
# Make sure target directory exists
|
||||
TARGET_DIR="$BASE_DIR/logs"
|
||||
mkdir -p "$TARGET_DIR"
|
||||
ZOMBIE_JSON="$BASE_DIR/zombie.json"
|
||||
|
||||
if [[ ! -f "$ZOMBIE_JSON" ]]; then
|
||||
echo "Zombie file $ZOMBIE_JSON not present, calling fallback"
|
||||
process_logs_from_fallback "$BASE_DIR" "$TARGET_DIR"
|
||||
else
|
||||
# we have a zombie.json file, let process it
|
||||
echo "Processing logs from zombie.json"
|
||||
process_logs_from_zombie_file "$BASE_DIR" "$TARGET_DIR" "$ZOMBIE_JSON"
|
||||
fi
|
||||
echo ""
|
||||
done
|
||||
|
||||
# sleep for a minute to give alloy time to forward logs
|
||||
sleep 60
|
||||
Executable
+16
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
PRODUCT=$1
|
||||
VERSION=$2
|
||||
PROFILE=${PROFILE:-production}
|
||||
|
||||
cargo install --version 2.7.0 cargo-deb --locked -q
|
||||
echo "Using cargo-deb v$(cargo-deb --version)"
|
||||
echo "Building a Debian package for '$PRODUCT' in '$PROFILE' profile"
|
||||
|
||||
cargo deb --profile $PROFILE --no-strip --no-build -p $PRODUCT --deb-version $VERSION
|
||||
|
||||
deb=target/debian/$PRODUCT_*_amd64.deb
|
||||
|
||||
cp $deb target/production/
|
||||
+40
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This is used to build our binaries:
|
||||
# - pezkuwi
|
||||
# - pezkuwi-teyrchain
|
||||
# - pezkuwi-omni-node
|
||||
#
|
||||
# set -e
|
||||
|
||||
BIN=$1
|
||||
PACKAGE=${2:-$BIN}
|
||||
# must be given as feature1,feature2,feature3...
|
||||
FEATURES=$3
|
||||
if [ -n "$FEATURES" ]; then
|
||||
FEATURES="--features ${FEATURES}"
|
||||
fi
|
||||
|
||||
PROFILE=${PROFILE:-production}
|
||||
ARTIFACTS=/artifacts/$BIN
|
||||
|
||||
echo "Artifacts will be copied into $ARTIFACTS"
|
||||
mkdir -p "$ARTIFACTS"
|
||||
|
||||
git log --pretty=oneline -n 1
|
||||
time cargo build --profile $PROFILE --locked --verbose --bin $BIN --package $PACKAGE $FEATURES
|
||||
|
||||
echo "Artifact target: $ARTIFACTS"
|
||||
|
||||
cp ./target/$PROFILE/$BIN "$ARTIFACTS"
|
||||
pushd "$ARTIFACTS" > /dev/null
|
||||
sha256sum "$BIN" | tee "$BIN.sha256"
|
||||
chmod a+x "$BIN"
|
||||
VERSION="$($ARTIFACTS/$BIN --version)"
|
||||
EXTRATAG="$(echo "${VERSION}" |
|
||||
sed -n -r 's/^'$BIN' ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p')"
|
||||
EXTRATAG="${VERSION}-${EXTRATAG}-$(cut -c 1-8 $ARTIFACTS/$BIN.sha256)"
|
||||
|
||||
echo "$BIN version = ${VERSION} (EXTRATAG = ${EXTRATAG})"
|
||||
echo -n ${VERSION} > "$ARTIFACTS/VERSION"
|
||||
echo -n ${EXTRATAG} > "$ARTIFACTS/EXTRATAG"
|
||||
+42
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This is used to build our binaries:
|
||||
# - pezkuwi
|
||||
# - pezkuwi-teyrchain
|
||||
# - pezkuwi-omni-node
|
||||
# set -e
|
||||
|
||||
BIN=$1
|
||||
PACKAGE=${2:-$BIN}
|
||||
|
||||
PROFILE=${PROFILE:-production}
|
||||
# parity-macos runner needs a path where it can
|
||||
# write, so make it relative to github workspace.
|
||||
ARTIFACTS=$GITHUB_WORKSPACE/artifacts/$BIN
|
||||
VERSION=$(git tag -l --contains HEAD | grep -E "^v.*")
|
||||
# must be given as feature1,feature2,feature3...
|
||||
FEATURES=$3
|
||||
if [ -n "$FEATURES" ]; then
|
||||
FEATURES="--features ${FEATURES}"
|
||||
fi
|
||||
|
||||
echo "Artifacts will be copied into $ARTIFACTS"
|
||||
mkdir -p "$ARTIFACTS"
|
||||
|
||||
git log --pretty=oneline -n 1
|
||||
time cargo build --profile $PROFILE --locked --verbose --bin $BIN --package $PACKAGE $FEATURES
|
||||
|
||||
echo "Artifact target: $ARTIFACTS"
|
||||
|
||||
cp ./target/$PROFILE/$BIN "$ARTIFACTS"
|
||||
pushd "$ARTIFACTS" > /dev/null
|
||||
sha256sum "$BIN" | tee "$BIN.sha256"
|
||||
|
||||
EXTRATAG="$($ARTIFACTS/$BIN --version |
|
||||
sed -n -r 's/^'$BIN' ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p')"
|
||||
|
||||
EXTRATAG="${VERSION}-${EXTRATAG}-$(cut -c 1-8 $ARTIFACTS/$BIN.sha256)"
|
||||
|
||||
echo "$BIN version = ${VERSION} (EXTRATAG = ${EXTRATAG})"
|
||||
echo -n ${VERSION} > "$ARTIFACTS/VERSION"
|
||||
echo -n ${EXTRATAG} > "$ARTIFACTS/EXTRATAG"
|
||||
Executable
+68
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# --- Configuration ---
|
||||
PRODUCT=${1:?"Usage: $0 <product_name> <version>"}
|
||||
VERSION=${2:?"Usage: $0 <product_name> <version>"}
|
||||
PROFILE=${PROFILE:-production}
|
||||
ARCH="x86_64"
|
||||
|
||||
SOURCE_DIR="target/${PROFILE}"
|
||||
STAGING_DIR="/tmp/${PRODUCT}-staging"
|
||||
DEST_DIR="target/production"
|
||||
|
||||
# --- Script Start ---
|
||||
echo "📦 Starting RPM build for '$PRODUCT' version '$VERSION'..."
|
||||
|
||||
# 1. Clean up and create a fresh staging directory
|
||||
echo "🔧 Setting up staging directory: ${STAGING_DIR}"
|
||||
rm -rf "$STAGING_DIR"
|
||||
mkdir -p "$STAGING_DIR/usr/bin"
|
||||
mkdir -p "$STAGING_DIR/usr/lib/${PRODUCT}"
|
||||
mkdir -p "$STAGING_DIR/usr/lib/systemd/system"
|
||||
mkdir -p "$STAGING_DIR/etc/default"
|
||||
|
||||
# 2. Copy compiled binaries and assets into the staging directory
|
||||
echo "📂 Copying application files..."
|
||||
cp "${SOURCE_DIR}/${PRODUCT}" "${STAGING_DIR}/usr/bin/"
|
||||
cp "${SOURCE_DIR}/${PRODUCT}-prepare-worker" "${STAGING_DIR}/usr/lib/${PRODUCT}/"
|
||||
cp "${SOURCE_DIR}/${PRODUCT}-execute-worker" "${STAGING_DIR}/usr/lib/${PRODUCT}/"
|
||||
# MODIFIED PATH: Prefixed with the subdirectory name
|
||||
cp "pezkuwi/scripts/packaging/pezkuwi.service" "${STAGING_DIR}/usr/lib/systemd/system/"
|
||||
|
||||
# Create default config file
|
||||
echo 'PEZKUWI_CLI_ARGS=""' > "$STAGING_DIR/etc/default/pezkuwi"
|
||||
|
||||
# 3. Use fpm to package the staging directory into an RPM
|
||||
# fpm config file .fpm is located in the pezkuwi-sdk root directory
|
||||
echo "🎁 Running fpm to create the RPM package..."
|
||||
fpm \
|
||||
-s dir \
|
||||
-t rpm \
|
||||
-n "$PRODUCT" \
|
||||
-v "$VERSION" \
|
||||
-a "$ARCH" \
|
||||
--rpm-os linux \
|
||||
--description "Pezkuwi Node" \
|
||||
--license "GPL-3.0-only" \
|
||||
--url "https://pezkuwi.network/" \
|
||||
--depends systemd \
|
||||
--depends shadow-utils \
|
||||
--after-install "pezkuwi/scripts/packaging/rpm-maintainer-scripts/rpm-postinst.sh" \
|
||||
--before-remove "pezkuwi/scripts/packaging/rpm-maintainer-scripts/rpm-preun.sh" \
|
||||
--after-remove "pezkuwi/scripts/packaging/rpm-maintainer-scripts/rpm-postun.sh" \
|
||||
--config-files "/etc/default/pezkuwi" \
|
||||
-C "$STAGING_DIR" \
|
||||
.
|
||||
|
||||
# 4. Move the final RPM to the artifacts directory
|
||||
echo "🚚 Moving RPM to '${DEST_DIR}'..."
|
||||
mkdir -p "$DEST_DIR"
|
||||
mv "${PRODUCT}-${VERSION}-1.${ARCH}.rpm" "$DEST_DIR/"
|
||||
|
||||
# 5. Clean up the staging directory
|
||||
echo "🧹 Cleaning up temporary files..."
|
||||
rm -rf "$STAGING_DIR"
|
||||
|
||||
echo "✅ RPM package build complete!"
|
||||
ls -l "$DEST_DIR"
|
||||
@@ -0,0 +1,39 @@
|
||||
Origin: Parity
|
||||
Label: Parity
|
||||
Codename: release
|
||||
Architectures: amd64
|
||||
Components: main
|
||||
Description: Apt repository for software made by Parity Technologies Ltd.
|
||||
SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE
|
||||
|
||||
Origin: Parity
|
||||
Label: Parity Staging
|
||||
Codename: staging
|
||||
Architectures: amd64
|
||||
Components: main
|
||||
Description: Staging distribution for Parity Technologies Ltd. packages
|
||||
SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE
|
||||
|
||||
Origin: Parity
|
||||
Label: Parity stable2407
|
||||
Codename: stable2407
|
||||
Architectures: amd64
|
||||
Components: main
|
||||
Description: Apt repository for software made by Parity Technologies Ltd.
|
||||
SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE
|
||||
|
||||
Origin: Parity
|
||||
Label: Parity stable2409
|
||||
Codename: stable2409
|
||||
Architectures: amd64
|
||||
Components: main
|
||||
Description: Apt repository for software made by Parity Technologies Ltd.
|
||||
SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE
|
||||
|
||||
Origin: Parity
|
||||
Label: Parity stable2412
|
||||
Codename: stable2412
|
||||
Architectures: amd64
|
||||
Components: main
|
||||
Description: Apt repository for software made by Parity Technologies Ltd.
|
||||
SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE
|
||||
@@ -0,0 +1,112 @@
|
||||
#!/bin/bash
|
||||
# pgpkms wrapper to make it compatible with RPM's GPG interface
|
||||
# This script translates RPM's GPG arguments to pgpkms format
|
||||
|
||||
# Debug: log all arguments to stderr
|
||||
echo "pgpkms-gpg-wrapper called with args: $*" >&2
|
||||
|
||||
# Parse arguments to find the input file and options
|
||||
input_file=""
|
||||
output_file=""
|
||||
detach_sign=false
|
||||
armor=false
|
||||
local_user=""
|
||||
read_from_stdin=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--detach-sign)
|
||||
detach_sign=true
|
||||
shift
|
||||
;;
|
||||
--armor)
|
||||
armor=true
|
||||
shift
|
||||
;;
|
||||
--local-user)
|
||||
local_user="$2"
|
||||
shift 2
|
||||
;;
|
||||
-u)
|
||||
local_user="$2"
|
||||
shift 2
|
||||
;;
|
||||
-sbo)
|
||||
# RPM uses -sbo which means: -s (sign), -b (detach), -o (output to file)
|
||||
detach_sign=true
|
||||
# The next argument should be the output file
|
||||
shift
|
||||
if [[ -n "$1" ]] && [[ "$1" != "--" ]]; then
|
||||
output_file="$1"
|
||||
shift
|
||||
fi
|
||||
;;
|
||||
--no-verbose|--no-armor|--no-secmem-warning|--batch|--no-tty|--pinentry-mode|--passphrase-fd)
|
||||
# Skip these GPG-specific options
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
# End of options marker
|
||||
shift
|
||||
break
|
||||
;;
|
||||
--*)
|
||||
# Skip other long options
|
||||
shift
|
||||
;;
|
||||
-*)
|
||||
# Skip other short options
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
# This could be a file argument
|
||||
if [[ "$1" == "-" ]]; then
|
||||
read_from_stdin=true
|
||||
elif [[ -z "$input_file" ]] && [[ -f "$1" ]]; then
|
||||
input_file="$1"
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Handle remaining arguments after --
|
||||
while [[ $# -gt 0 ]]; do
|
||||
if [[ "$1" == "-" ]]; then
|
||||
read_from_stdin=true
|
||||
elif [[ -z "$input_file" ]] && [[ -f "$1" ]]; then
|
||||
input_file="$1"
|
||||
fi
|
||||
shift
|
||||
done
|
||||
|
||||
echo "Parsed: input_file='$input_file', output_file='$output_file', read_from_stdin=$read_from_stdin, armor=$armor" >&2
|
||||
|
||||
# If we're supposed to read from stdin, we need to create a temp file
|
||||
if [[ "$read_from_stdin" == "true" ]]; then
|
||||
temp_input=$(mktemp)
|
||||
cat > "$temp_input"
|
||||
input_file="$temp_input"
|
||||
echo "Created temp file for stdin: $input_file" >&2
|
||||
fi
|
||||
|
||||
if [[ -z "$input_file" ]]; then
|
||||
echo "Error: No input file found" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Signing file: $input_file" >&2
|
||||
|
||||
# Call pgpkms with the appropriate arguments
|
||||
pgpkms_args="sign --input $input_file"
|
||||
|
||||
if [[ -n "$output_file" ]]; then
|
||||
pgpkms_args="$pgpkms_args --output $output_file"
|
||||
fi
|
||||
|
||||
if [[ "$armor" != "true" ]]; then
|
||||
pgpkms_args="$pgpkms_args --binary"
|
||||
fi
|
||||
|
||||
echo "Running: /home/runner/.local/bin/pgpkms $pgpkms_args" >&2
|
||||
exec /home/runner/.local/bin/pgpkms $pgpkms_args
|
||||
@@ -0,0 +1,206 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Set the new version by replacing the value of the constant given as pattern
|
||||
# in the file.
|
||||
#
|
||||
# input: pattern, version, file
|
||||
#output: none
|
||||
set_version() {
|
||||
pattern=$1
|
||||
version=$2
|
||||
file=$3
|
||||
|
||||
sed -i "s/$pattern/\1\"${version}\"/g" $file
|
||||
return 0
|
||||
}
|
||||
|
||||
# Commit changes to git with specific message.
|
||||
# "|| true" does not let script to fail with exit code 1,
|
||||
# in case there is nothing to commit.
|
||||
#
|
||||
# input: MESSAGE (any message which should be used for the commit)
|
||||
# output: none
|
||||
commit_with_message() {
|
||||
MESSAGE=$1
|
||||
git commit -a -m "$MESSAGE" || true
|
||||
}
|
||||
|
||||
# Retun list of the runtimes filterd
|
||||
# input: none
|
||||
# output: list of filtered runtimes
|
||||
get_filtered_runtimes_list() {
|
||||
grep_filters=("runtime.*" "test|template|starters|substrate")
|
||||
|
||||
git grep spec_version: | grep .rs: | grep -e "${grep_filters[0]}" | grep "lib.rs" | grep -vE "${grep_filters[1]}" | cut -d: -f1
|
||||
}
|
||||
|
||||
# Sets provided spec version
|
||||
# input: version
|
||||
set_spec_versions() {
|
||||
NEW_VERSION=$1
|
||||
runtimes_list=(${@:2})
|
||||
|
||||
printf "Setting spec_version to $NEW_VERSION\n"
|
||||
|
||||
for f in ${runtimes_list[@]}; do
|
||||
printf " processing $f"
|
||||
sed -ri "s/spec_version: [0-9]+_[0-9]+_[0-9]+,/spec_version: $NEW_VERSION,/" $f
|
||||
done
|
||||
|
||||
commit_with_message "Bump spec_version to $NEW_VERSION"
|
||||
|
||||
git_show_log 'spec_version'
|
||||
}
|
||||
|
||||
# Displays formated results of the git log command
|
||||
# for the given pattern which needs to be found in logs
|
||||
# input: pattern, count (optional, default is 10)
|
||||
git_show_log() {
|
||||
PATTERN="$1"
|
||||
COUNT=${2:-10}
|
||||
git log --pretty=format:"%h %ad | %s%d [%an]" --graph --date=iso-strict | \
|
||||
head -n $COUNT | grep -iE "$PATTERN" --color=always -z
|
||||
}
|
||||
|
||||
# Get a spec_version number from the crate version
|
||||
#
|
||||
# ## inputs
|
||||
# - v1.12.0 or 1.12.0
|
||||
#
|
||||
# ## output:
|
||||
# 1_012_000 or 1_012_001 if SUFFIX is set
|
||||
function get_spec_version() {
|
||||
INPUT=$1
|
||||
SUFFIX=${SUFFIX:-000} #this variable makes it possible to set a specific runtime version like 93826 it can be initialised as system variable
|
||||
[[ $INPUT =~ .*([0-9]+\.[0-9]+\.[0-9]{1,2}).* ]]
|
||||
VERSION="${BASH_REMATCH[1]}"
|
||||
MATCH="${BASH_REMATCH[0]}"
|
||||
if [ -z $MATCH ]; then
|
||||
return 1
|
||||
else
|
||||
SPEC_VERSION="$(sed -e "s/\./_0/g" -e "s/_[^_]*\$/_$SUFFIX/" <<< $VERSION)"
|
||||
echo "$SPEC_VERSION"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Reorganize the prdoc files for the release
|
||||
#
|
||||
# input: VERSION (e.g. v1.0.0)
|
||||
# output: none
|
||||
reorder_prdocs() {
|
||||
VERSION="$1"
|
||||
|
||||
printf "[+] ℹ️ Reordering prdocs:"
|
||||
|
||||
VERSION=$(sed -E 's/^v([0-9]+\.[0-9]+\.[0-9]+).*$/\1/' <<< "$VERSION") #getting reed of the 'v' prefix
|
||||
mkdir -p "prdoc/$VERSION"
|
||||
mv prdoc/pr_*.prdoc prdoc/$VERSION
|
||||
git add -A
|
||||
commit_with_message "Reordering prdocs for the release $VERSION"
|
||||
}
|
||||
|
||||
# Bump the binary version of the pezkuwi-teyrchain binary with the
|
||||
# new bumped version and commit changes.
|
||||
#
|
||||
# input: version e.g. 1.16.0
|
||||
set_pezkuwi_teyrchain_binary_version() {
|
||||
bumped_version="$1"
|
||||
cargo_toml_file="$2"
|
||||
|
||||
set_version "\(^version = \)\".*\"" $bumped_version $cargo_toml_file
|
||||
|
||||
cargo update --workspace --offline # we need this to update Cargo.loc with the new versions as well
|
||||
|
||||
MESSAGE="Bump versions in: ${cargo_toml_file}"
|
||||
commit_with_message "$MESSAGE"
|
||||
git_show_log "$MESSAGE"
|
||||
}
|
||||
|
||||
|
||||
upload_s3_release() {
|
||||
alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws'
|
||||
|
||||
product=$1
|
||||
version=$2
|
||||
target=$3
|
||||
|
||||
echo "Working on product: $product "
|
||||
echo "Working on version: $version "
|
||||
echo "Working on platform: $target "
|
||||
|
||||
URL_BASE=$(get_s3_url_base $product)
|
||||
|
||||
echo "Current content, should be empty on new uploads:"
|
||||
aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize || true
|
||||
echo "Content to be uploaded:"
|
||||
artifacts="release-artifacts/$target/$product/"
|
||||
ls "$artifacts"
|
||||
aws s3 sync --acl public-read "$artifacts" "s3://${URL_BASE}/${version}/${target}"
|
||||
echo "Uploaded files:"
|
||||
aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize
|
||||
echo "✅ The release should be at https://${URL_BASE}/${version}/${target}"
|
||||
}
|
||||
|
||||
# Upload runtimes artifacts to s3 release bucket
|
||||
#
|
||||
# input: version (stable release tag e.g. pezkuwi-stable2412 or pezkuwi-stable2412-rc1)
|
||||
# output: none
|
||||
upload_s3_runtimes_release_artifacts() {
|
||||
alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws'
|
||||
|
||||
version=$1
|
||||
|
||||
echo "Working on version: $version "
|
||||
|
||||
echo "Current content, should be empty on new uploads:"
|
||||
aws s3 ls "s3://releases.parity.io/pezkuwi/runtimes/${version}/" --recursive --human-readable --summarize || true
|
||||
echo "Content to be uploaded:"
|
||||
artifacts="artifacts/runtimes/"
|
||||
ls "$artifacts"
|
||||
aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/pezkuwi/runtimes/${version}/"
|
||||
echo "Uploaded files:"
|
||||
aws s3 ls "s3://releases.parity.io/pezkuwi/runtimes/${version}/" --recursive --human-readable --summarize
|
||||
echo "✅ The release should be at https://releases.parity.io/pezkuwi/runtimes/${version}"
|
||||
}
|
||||
|
||||
|
||||
# Pass the name of the binary as input, it will
|
||||
# return the s3 base url
|
||||
function get_s3_url_base() {
|
||||
name=$1
|
||||
case $name in
|
||||
pezkuwi | pezkuwi-execute-worker | pezkuwi-prepare-worker )
|
||||
printf "releases.parity.io/pezkuwi"
|
||||
;;
|
||||
|
||||
pezkuwi-teyrchain)
|
||||
printf "releases.parity.io/pezkuwi-teyrchain"
|
||||
;;
|
||||
|
||||
pezkuwi-omni-node)
|
||||
printf "releases.parity.io/pezkuwi-omni-node"
|
||||
;;
|
||||
|
||||
chain-spec-builder)
|
||||
printf "releases.parity.io/chain-spec-builder"
|
||||
;;
|
||||
|
||||
frame-omni-bencher)
|
||||
printf "releases.parity.io/frame-omni-bencher"
|
||||
;;
|
||||
substrate-node)
|
||||
printf "releases.parity.io/substrate-node"
|
||||
;;
|
||||
eth-rpc)
|
||||
printf "releases.parity.io/eth-rpc"
|
||||
;;
|
||||
subkey)
|
||||
printf "releases.parity.io/subkey"
|
||||
;;
|
||||
*)
|
||||
printf "UNSUPPORTED BINARY $name"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
%_signature gpg
|
||||
%_gpg_name 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE
|
||||
%__gpg /home/runner/work/pezkuwi-sdk/pezkuwi-sdk/.github/scripts/release/pgpkms-gpg-wrapper.sh
|
||||
%__gpgbin /home/runner/work/pezkuwi-sdk/pezkuwi-sdk/.github/scripts/release/pgpkms-gpg-wrapper.sh
|
||||
Executable
+85
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script executes a given zombienet test for the `native` provider.
|
||||
# It is equivalent to running run-test-local-env-manager.sh for the `k8s` provider.
|
||||
|
||||
function run_test {
|
||||
cd "${OUTPUT_DIR}"
|
||||
for i in $(find ${OUTPUT_DIR} -name "${TEST_TO_RUN}"| head -1); do
|
||||
TEST_FOUND=1
|
||||
# in order to let native provider work properly we need
|
||||
# to unset ZOMBIENET_IMAGE, which controls 'inCI' internal flag.
|
||||
# ZOMBIENET_IMAGE not set && RUN_IN_CONTAINER=0 => inCI=false
|
||||
# Apparently inCI=true works properly only with k8s provider
|
||||
unset ZOMBIENET_IMAGE
|
||||
if [ -z "$ZOMBIE_BASE_DIR" ]; then
|
||||
${ZOMBIE_COMMAND} -p native -c $CONCURRENCY test $i
|
||||
else
|
||||
${ZOMBIE_COMMAND} -p native -c $CONCURRENCY -d $ZOMBIE_BASE_DIR -f test $i
|
||||
fi;
|
||||
EXIT_STATUS=$?
|
||||
done;
|
||||
if [[ $TEST_FOUND -lt 1 ]]; then
|
||||
EXIT_STATUS=1
|
||||
fi;
|
||||
}
|
||||
|
||||
function create_isolated_dir {
|
||||
TS=$(date +%s)
|
||||
ISOLATED=${OUTPUT_DIR}/${TS}
|
||||
mkdir -p ${ISOLATED}
|
||||
OUTPUT_DIR="${ISOLATED}"
|
||||
}
|
||||
|
||||
function copy_to_isolated {
|
||||
cd "${SCRIPT_PATH}"
|
||||
echo $(pwd)
|
||||
cp -r "${LOCAL_DIR}"/* "${OUTPUT_DIR}"
|
||||
}
|
||||
|
||||
function rm_isolated_dir {
|
||||
echo "Removing ${OUTPUT_DIR}"
|
||||
rm -rf "${OUTPUT_DIR}"
|
||||
}
|
||||
|
||||
function log {
|
||||
local lvl msg fmt
|
||||
lvl=$1 msg=$2
|
||||
fmt='+%Y-%m-%d %H:%M:%S'
|
||||
lg_date=$(date "${fmt}")
|
||||
if [[ "${lvl}" = "DIE" ]] ; then
|
||||
lvl="ERROR"
|
||||
echo -e "\n${lg_date} - ${lvl} - ${msg}"
|
||||
exit 1
|
||||
else
|
||||
echo -e "\n${lg_date} - ${lvl} - ${msg}"
|
||||
fi
|
||||
}
|
||||
|
||||
set -x
|
||||
|
||||
SCRIPT_NAME="$0"
|
||||
SCRIPT_PATH=$(dirname "$0") # relative
|
||||
SCRIPT_PATH=$(cd "${SCRIPT_PATH}" && pwd) # absolutized and normalized
|
||||
|
||||
ZOMBIE_COMMAND=zombie
|
||||
|
||||
EXIT_STATUS=0
|
||||
|
||||
# args
|
||||
LOCAL_DIR="$1"
|
||||
CONCURRENCY="$2"
|
||||
TEST_TO_RUN="$3"
|
||||
ZOMBIE_BASE_DIR="$4"
|
||||
|
||||
cd "${SCRIPT_PATH}"
|
||||
|
||||
OUTPUT_DIR="${SCRIPT_PATH}"
|
||||
|
||||
create_isolated_dir
|
||||
copy_to_isolated
|
||||
run_test
|
||||
rm_isolated_dir
|
||||
|
||||
log INFO "Exit status is ${EXIT_STATUS}"
|
||||
exit "${EXIT_STATUS}"
|
||||
@@ -0,0 +1,79 @@
|
||||
from github import Github
|
||||
import re
|
||||
import os
|
||||
from datetime import date
|
||||
|
||||
g = Github(os.getenv("GH_TOKEN"))
|
||||
|
||||
# Regex pattern to match wish format:
|
||||
wish_pattern = re.compile(
|
||||
r"I wish for:? (https://github\.com/([a-zA-Z0-9_.-]+)/([a-zA-Z0-9_.-]+)/(issues|pull)/(\d+))"
|
||||
)
|
||||
|
||||
wishlist_issue = g.get_repo(os.getenv("WISHLIST_REPOSITORY")).get_issue(
|
||||
int(os.getenv("WISHLIST_ISSUE_NUMBER"))
|
||||
)
|
||||
new_leaderboard = (
|
||||
"| Feature Request | Summary | Votes | Status |\n| --- | --- | --- | --- |\n"
|
||||
)
|
||||
wishes = {}
|
||||
issue_details = {}
|
||||
|
||||
for comment in wishlist_issue.get_comments():
|
||||
# in the comment body, if there is a string `#(\d)`, replace it with
|
||||
# https://github.com/pezkuwichain/pezkuwi-sdk/issues/(number)
|
||||
updated_body = re.sub(
|
||||
r"#(\d+)", r"https://github.com/pezkuwichain/pezkuwi-sdk/issues/\1", comment.body
|
||||
)
|
||||
|
||||
matches = wish_pattern.findall(updated_body)
|
||||
for match in matches:
|
||||
url, org, repo_name, _, issue_id = match
|
||||
issue_key = (url, org, repo_name, issue_id)
|
||||
if issue_key not in wishes:
|
||||
wishes[issue_key] = []
|
||||
|
||||
# Get the author and upvoters of the wish comment.
|
||||
wishes[issue_key].append(comment.user.id)
|
||||
wishes[issue_key].extend(
|
||||
[
|
||||
reaction.user.id
|
||||
for reaction in comment.get_reactions()
|
||||
if reaction.content in ["+1", "heart", "rocket"]
|
||||
]
|
||||
)
|
||||
|
||||
# Get upvoters of the desired issue.
|
||||
desired_issue = g.get_repo(f"{org}/{repo_name}").get_issue(int(issue_id))
|
||||
wishes[issue_key].extend(
|
||||
[
|
||||
reaction.user.id
|
||||
for reaction in desired_issue.get_reactions()
|
||||
if reaction.content in ["+1", "heart", "rocket"]
|
||||
]
|
||||
)
|
||||
issue_details[url] = [
|
||||
desired_issue.title,
|
||||
"👾 Open" if desired_issue.state == "open" else "✅Closed",
|
||||
]
|
||||
|
||||
# Count unique wishes - the author of the wish, upvoters of the wish, and upvoters of the desired issue.
|
||||
for key in wishes:
|
||||
wishes[key] = len(list(set(wishes[key])))
|
||||
|
||||
# Sort wishes by count and add to the markdown table
|
||||
sorted_wishes = sorted(wishes.items(), key=lambda x: x[1], reverse=True)
|
||||
for (url, _, _, _), count in sorted_wishes:
|
||||
[summary, status] = issue_details.get(url, "No summary available")
|
||||
new_leaderboard += f"| {url} | {summary} | {count} | {status} |\n"
|
||||
new_leaderboard += f"\n> Last updated: {date.today().strftime('%Y-%m-%d')}\n"
|
||||
print(new_leaderboard)
|
||||
|
||||
new_content = re.sub(
|
||||
r"(\| Feature Request \|)(.*?)(> Last updated:)(.*?\n)",
|
||||
new_leaderboard,
|
||||
wishlist_issue.body,
|
||||
flags=re.DOTALL,
|
||||
)
|
||||
|
||||
wishlist_issue.edit(body=new_content)
|
||||
Reference in New Issue
Block a user