feat: Add rebrand CI/CD workflows to main branch
- Add 72 rebrand workflow files (polkadot→pezkuwi, substrate→bizinikiwi, cumulus→pezcumulus) - Add GitHub actions, issue templates, and configs - Removed unnecessary workflows (fork-sync, gitspiegel, upstream-tracker, sync-templates, backport) - Renamed zombienet test files to match new naming convention
This commit is contained in:
@@ -0,0 +1,62 @@
|
||||
# Command Bot Documentation
|
||||
|
||||
The command bot allows contributors to perform self-service actions on PRs using comment commands.
|
||||
|
||||
## Available Commands
|
||||
|
||||
### Label Command (Self-service)
|
||||
|
||||
Add labels to your PR without requiring maintainer intervention:
|
||||
|
||||
```bash
|
||||
/cmd label T1-FRAME # Add single label
|
||||
/cmd label T1-FRAME R0-no-crate-publish-required # Add multiple labels
|
||||
/cmd label T1-FRAME A2-substantial D3-involved # Add multiple labels
|
||||
```
|
||||
|
||||
**Available Labels:**
|
||||
The bot dynamically fetches all current labels from the repository, ensuring it's always up-to-date. For label meanings and descriptions, see the [official label documentation](https://docs.pezkuwichain.io/labels/doc_pezkuwi-sdk.html).
|
||||
|
||||
**Features**:
|
||||
- **Auto-Correction**: Automatically fixes high-confidence typos (e.g., `T1-FRAM` → `T1-FRAME`)
|
||||
- **Case Fixing**: Handles case variations (e.g., `I2-Bug` → `I2-bug`)
|
||||
- **Smart Suggestions**: For ambiguous inputs, provides multiple options to choose from
|
||||
|
||||
### Other Commands
|
||||
|
||||
```bash
|
||||
/cmd fmt # Format code (cargo +nightly fmt and taplo)
|
||||
/cmd prdoc # Generate PR documentation
|
||||
/cmd bench # Run benchmarks
|
||||
/cmd update-ui # Update UI tests
|
||||
/cmd --help # Show help for all commands
|
||||
```
|
||||
|
||||
### Common Flags
|
||||
|
||||
- `--quiet`: Don't post start/end messages in PR
|
||||
- `--clean`: Clean up previous bot comments
|
||||
- `--image <image>`: Override docker image
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **Command Detection**: The bot listens for comments starting with `/cmd` on PRs
|
||||
2. **Permission Check**: Verifies if the user is an organization member
|
||||
3. **Command Execution**: Runs the specified command in a containerized environment
|
||||
4. **Result Handling**:
|
||||
- For label commands: Applies labels via GitHub API
|
||||
- For other commands: Commits changes back to the PR branch
|
||||
5. **Feedback**: Posts success/failure messages in the PR
|
||||
|
||||
## Security
|
||||
|
||||
- Organization member check prevents unauthorized usage
|
||||
- Commands from non-members run using bot scripts from master branch
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If a command fails:
|
||||
1. Check the GitHub Actions logs linked in the bot's comment
|
||||
2. Verify the command syntax matches the examples
|
||||
3. Ensure you have permission to perform the action
|
||||
4. For label commands, verify the label names are in the allowed list
|
||||
@@ -0,0 +1,26 @@
|
||||
import argparse
|
||||
|
||||
"""
|
||||
|
||||
Custom help action for argparse, it prints the help message for the main parser and all subparsers.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class _HelpAction(argparse._HelpAction):
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
parser.print_help()
|
||||
|
||||
# retrieve subparsers from parser
|
||||
subparsers_actions = [
|
||||
action for action in parser._actions
|
||||
if isinstance(action, argparse._SubParsersAction)]
|
||||
# there will probably only be one subparser_action,
|
||||
# but better save than sorry
|
||||
for subparsers_action in subparsers_actions:
|
||||
# get all subparsers and print help
|
||||
for choice, subparser in subparsers_action.choices.items():
|
||||
print("\n### Command '{}'".format(choice))
|
||||
print(subparser.format_help())
|
||||
|
||||
parser.exit()
|
||||
Executable
+565
@@ -0,0 +1,565 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import argparse
|
||||
import _help
|
||||
import importlib.util
|
||||
import re
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
import difflib
|
||||
|
||||
_HelpAction = _help._HelpAction
|
||||
|
||||
f = open('.github/workflows/runtimes-matrix.json', 'r')
|
||||
runtimesMatrix = json.load(f)
|
||||
|
||||
runtimeNames = list(map(lambda x: x['name'], runtimesMatrix))
|
||||
|
||||
common_args = {
|
||||
'--quiet': {"action": "store_true", "help": "Won't print start/end/failed messages in PR"},
|
||||
'--clean': {"action": "store_true", "help": "Clean up the previous bot's & author's comments in PR"},
|
||||
'--image': {"help": "Override docker image '--image docker.io/paritytech/ci-unified:latest'"},
|
||||
}
|
||||
|
||||
def print_and_log(message, output_file='/tmp/cmd/command_output.log'):
|
||||
print(message)
|
||||
with open(output_file, 'a') as f:
|
||||
f.write(message + '\n')
|
||||
|
||||
def setup_logging():
|
||||
if not os.path.exists('/tmp/cmd'):
|
||||
os.makedirs('/tmp/cmd')
|
||||
open('/tmp/cmd/command_output.log', 'w')
|
||||
|
||||
def fetch_repo_labels():
|
||||
"""Fetch current labels from the GitHub repository"""
|
||||
try:
|
||||
# Use GitHub API to get current labels
|
||||
repo_owner = os.environ.get('GITHUB_REPOSITORY_OWNER', 'pezkuwichain')
|
||||
repo_name = os.environ.get('GITHUB_REPOSITORY', 'pezkuwichain/pezkuwi-sdk').split('/')[-1]
|
||||
|
||||
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/labels?per_page=100"
|
||||
|
||||
# Add GitHub token if available for higher rate limits
|
||||
headers = {'User-Agent': 'pezkuwi-sdk-cmd-bot'}
|
||||
github_token = os.environ.get('GITHUB_TOKEN')
|
||||
if github_token:
|
||||
headers['Authorization'] = f'token {github_token}'
|
||||
|
||||
req = urllib.request.Request(api_url, headers=headers)
|
||||
|
||||
with urllib.request.urlopen(req) as response:
|
||||
if response.getcode() == 200:
|
||||
labels_data = json.loads(response.read().decode())
|
||||
label_names = [label['name'] for label in labels_data]
|
||||
print_and_log(f"Fetched {len(label_names)} labels from repository")
|
||||
return label_names
|
||||
else:
|
||||
print_and_log(f"Failed to fetch labels: HTTP {response.getcode()}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print_and_log(f"Error fetching labels from repository: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def check_pr_status(pr_number):
|
||||
"""Check if PR is merged or in merge queue"""
|
||||
try:
|
||||
# Get GitHub token from environment
|
||||
github_token = os.environ.get('GITHUB_TOKEN')
|
||||
if not github_token:
|
||||
print_and_log("Error: GITHUB_TOKEN not set, cannot verify PR status")
|
||||
return False # Prevent labeling if we can't check status
|
||||
|
||||
repo_owner = os.environ.get('GITHUB_REPOSITORY_OWNER', 'pezkuwichain')
|
||||
repo_name = os.environ.get('GITHUB_REPOSITORY', 'pezkuwichain/pezkuwi-sdk').split('/')[-1]
|
||||
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/pulls/{pr_number}"
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'pezkuwi-sdk-cmd-bot',
|
||||
'Authorization': f'token {github_token}',
|
||||
'Accept': 'application/vnd.github.v3+json'
|
||||
}
|
||||
|
||||
req = urllib.request.Request(api_url, headers=headers)
|
||||
|
||||
with urllib.request.urlopen(req) as response:
|
||||
if response.getcode() == 200:
|
||||
data = json.loads(response.read().decode())
|
||||
|
||||
# Check if PR is merged
|
||||
if data.get('merged', False):
|
||||
return False
|
||||
|
||||
# Check if PR is closed
|
||||
if data.get('state') == 'closed':
|
||||
return False
|
||||
|
||||
# Check if PR is in merge queue (auto_merge enabled)
|
||||
if data.get('auto_merge') is not None:
|
||||
return False
|
||||
|
||||
return True # PR is open and not in merge queue
|
||||
else:
|
||||
print_and_log(f"Failed to fetch PR status: HTTP {response.getcode()}")
|
||||
return False # Prevent labeling if we can't check status
|
||||
except Exception as e:
|
||||
print_and_log(f"Error checking PR status: {e}")
|
||||
return False # Prevent labeling if we can't check status
|
||||
|
||||
|
||||
def find_closest_labels(invalid_label, valid_labels, max_suggestions=3, cutoff=0.6):
|
||||
"""Find the closest matching labels using fuzzy string matching"""
|
||||
# Get close matches using difflib
|
||||
close_matches = difflib.get_close_matches(
|
||||
invalid_label,
|
||||
valid_labels,
|
||||
n=max_suggestions,
|
||||
cutoff=cutoff
|
||||
)
|
||||
|
||||
return close_matches
|
||||
|
||||
def auto_correct_labels(invalid_labels, valid_labels, auto_correct_threshold=0.8):
|
||||
"""Automatically correct labels when confidence is high, otherwise suggest"""
|
||||
corrections = []
|
||||
suggestions = []
|
||||
|
||||
for invalid_label in invalid_labels:
|
||||
closest = find_closest_labels(invalid_label, valid_labels, max_suggestions=1)
|
||||
|
||||
if closest:
|
||||
# Calculate similarity for the top match
|
||||
top_match = closest[0]
|
||||
similarity = difflib.SequenceMatcher(None, invalid_label.lower(), top_match.lower()).ratio()
|
||||
|
||||
if similarity >= auto_correct_threshold:
|
||||
# High confidence - auto-correct
|
||||
corrections.append((invalid_label, top_match))
|
||||
else:
|
||||
# Lower confidence - suggest alternatives
|
||||
all_matches = find_closest_labels(invalid_label, valid_labels, max_suggestions=3)
|
||||
if all_matches:
|
||||
labels_str = ', '.join(f"'{label}'" for label in all_matches)
|
||||
suggestion = f"'{invalid_label}' → did you mean: {labels_str}?"
|
||||
else:
|
||||
suggestion = f"'{invalid_label}' → no close matches found"
|
||||
suggestions.append(suggestion)
|
||||
else:
|
||||
# No close matches - try prefix suggestions
|
||||
prefix_match = re.match(r'^([A-Z]\d+)-', invalid_label)
|
||||
if prefix_match:
|
||||
prefix = prefix_match.group(1)
|
||||
prefix_labels = [label for label in valid_labels if label.startswith(prefix + '-')]
|
||||
if prefix_labels:
|
||||
# If there's exactly one prefix match, auto-correct it
|
||||
if len(prefix_labels) == 1:
|
||||
corrections.append((invalid_label, prefix_labels[0]))
|
||||
else:
|
||||
# Multiple prefix matches - suggest alternatives
|
||||
suggestion = f"'{invalid_label}' → try labels starting with '{prefix}-': {', '.join(prefix_labels[:3])}"
|
||||
suggestions.append(suggestion)
|
||||
else:
|
||||
suggestion = f"'{invalid_label}' → no labels found with prefix '{prefix}-'"
|
||||
suggestions.append(suggestion)
|
||||
else:
|
||||
suggestion = f"'{invalid_label}' → invalid format (expected format: 'T1-FRAME', 'I2-bug', etc.)"
|
||||
suggestions.append(suggestion)
|
||||
|
||||
return corrections, suggestions
|
||||
|
||||
parser = argparse.ArgumentParser(prog="/cmd ", description='A command runner for pezkuwi-sdk repo', add_help=False)
|
||||
parser.add_argument('--help', action=_HelpAction, help='help for help if you need some help') # help for help
|
||||
for arg, config in common_args.items():
|
||||
parser.add_argument(arg, **config)
|
||||
|
||||
subparsers = parser.add_subparsers(help='a command to run', dest='command')
|
||||
|
||||
setup_logging()
|
||||
|
||||
"""
|
||||
BENCH
|
||||
"""
|
||||
|
||||
bench_example = '''**Examples**:
|
||||
Runs all benchmarks
|
||||
%(prog)s
|
||||
|
||||
Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions
|
||||
%(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet
|
||||
|
||||
Runs bench for all pallets for zagros runtime and fails fast on first failed benchmark
|
||||
%(prog)s --runtime zagros --fail-fast
|
||||
|
||||
Does not output anything and cleans up the previous bot's & author command triggering comments in PR
|
||||
%(prog)s --runtime zagros pezkuwichain --pallet pallet_balances pallet_multisig --quiet --clean
|
||||
'''
|
||||
|
||||
parser_bench = subparsers.add_parser('bench', aliases=['bench-omni'], help='Runs benchmarks (frame omni bencher)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
|
||||
for arg, config in common_args.items():
|
||||
parser_bench.add_argument(arg, **config)
|
||||
|
||||
parser_bench.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames)
|
||||
parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
|
||||
parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')
|
||||
|
||||
|
||||
"""
|
||||
FMT
|
||||
"""
|
||||
parser_fmt = subparsers.add_parser('fmt', help='Formats code (cargo +nightly-VERSION fmt) and configs (taplo format)')
|
||||
for arg, config in common_args.items():
|
||||
parser_fmt.add_argument(arg, **config)
|
||||
|
||||
"""
|
||||
Update UI
|
||||
"""
|
||||
parser_ui = subparsers.add_parser('update-ui', help='Updates UI tests')
|
||||
for arg, config in common_args.items():
|
||||
parser_ui.add_argument(arg, **config)
|
||||
|
||||
"""
|
||||
PRDOC
|
||||
"""
|
||||
# Import generate-prdoc.py dynamically
|
||||
spec = importlib.util.spec_from_file_location("generate_prdoc", ".github/scripts/generate-prdoc.py")
|
||||
generate_prdoc = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(generate_prdoc)
|
||||
|
||||
parser_prdoc = subparsers.add_parser('prdoc', help='Generates PR documentation')
|
||||
generate_prdoc.setup_parser(parser_prdoc, pr_required=False)
|
||||
|
||||
"""
|
||||
LABEL
|
||||
"""
|
||||
# Fetch current labels from repository
|
||||
def get_allowed_labels():
|
||||
"""Get the current list of allowed labels"""
|
||||
repo_labels = fetch_repo_labels()
|
||||
|
||||
if repo_labels is not None:
|
||||
return repo_labels
|
||||
else:
|
||||
# Fail if API fetch fails
|
||||
raise RuntimeError("Failed to fetch labels from repository. Please check your connection and try again.")
|
||||
|
||||
def validate_and_auto_correct_labels(input_labels, valid_labels):
|
||||
"""Validate labels and auto-correct when confidence is high"""
|
||||
final_labels = []
|
||||
correction_messages = []
|
||||
all_suggestions = []
|
||||
no_match_labels = []
|
||||
|
||||
# Process all labels first to collect all issues
|
||||
for label in input_labels:
|
||||
if label in valid_labels:
|
||||
final_labels.append(label)
|
||||
else:
|
||||
# Invalid label - try auto-correction
|
||||
corrections, suggestions = auto_correct_labels([label], valid_labels)
|
||||
|
||||
if corrections:
|
||||
# Auto-correct with high confidence
|
||||
original, corrected = corrections[0]
|
||||
final_labels.append(corrected)
|
||||
similarity = difflib.SequenceMatcher(None, original.lower(), corrected.lower()).ratio()
|
||||
correction_messages.append(f"Auto-corrected '{original}' → '{corrected}' (similarity: {similarity:.2f})")
|
||||
elif suggestions:
|
||||
# Low confidence - collect for batch error
|
||||
all_suggestions.extend(suggestions)
|
||||
else:
|
||||
# No suggestions at all
|
||||
no_match_labels.append(label)
|
||||
|
||||
# If there are any labels that couldn't be auto-corrected, show all at once
|
||||
if all_suggestions or no_match_labels:
|
||||
error_parts = []
|
||||
|
||||
if all_suggestions:
|
||||
error_parts.append("Labels requiring manual selection:")
|
||||
for suggestion in all_suggestions:
|
||||
error_parts.append(f" • {suggestion}")
|
||||
|
||||
if no_match_labels:
|
||||
if all_suggestions:
|
||||
error_parts.append("") # Empty line for separation
|
||||
error_parts.append("Labels with no close matches:")
|
||||
for label in no_match_labels:
|
||||
error_parts.append(f" • '{label}' → no valid suggestions available")
|
||||
|
||||
error_parts.append("")
|
||||
error_parts.append("For all available labels, see: https://docs.pezkuwichain.io/labels/doc_pezkuwi-sdk.html")
|
||||
|
||||
error_msg = "\n".join(error_parts)
|
||||
raise ValueError(error_msg)
|
||||
|
||||
return final_labels, correction_messages
|
||||
|
||||
label_example = '''**Examples**:
|
||||
Add single label
|
||||
%(prog)s T1-FRAME
|
||||
|
||||
Add multiple labels
|
||||
%(prog)s T1-FRAME R0-no-crate-publish-required
|
||||
|
||||
Add multiple labels
|
||||
%(prog)s T1-FRAME A2-substantial D3-involved
|
||||
|
||||
Labels are fetched dynamically from the repository.
|
||||
Typos are auto-corrected when confidence is high (>80% similarity).
|
||||
For label meanings, see: https://docs.pezkuwichain.io/labels/doc_pezkuwi-sdk.html
|
||||
'''
|
||||
|
||||
parser_label = subparsers.add_parser('label', help='Add labels to PR (self-service for contributors)', epilog=label_example, formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
for arg, config in common_args.items():
|
||||
parser_label.add_argument(arg, **config)
|
||||
|
||||
parser_label.add_argument('labels', nargs='+', help='Labels to add to the PR (auto-corrects typos)')
|
||||
|
||||
def main():
|
||||
global args, unknown, runtimesMatrix
|
||||
args, unknown = parser.parse_known_args()
|
||||
|
||||
print(f'args: {args}')
|
||||
|
||||
if args.command == 'bench' or args.command == 'bench-omni':
|
||||
runtime_pallets_map = {}
|
||||
failed_benchmarks = {}
|
||||
successful_benchmarks = {}
|
||||
|
||||
profile = "production"
|
||||
|
||||
print(f'Provided runtimes: {args.runtime}')
|
||||
# convert to mapped dict
|
||||
runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix))
|
||||
runtimesMatrix = {x['name']: x for x in runtimesMatrix}
|
||||
print(f'Filtered out runtimes: {runtimesMatrix}')
|
||||
|
||||
compile_bencher = os.system(f"cargo install -q --path substrate/utils/frame/omni-bencher --locked --profile {profile}")
|
||||
if compile_bencher != 0:
|
||||
print_and_log('❌ Failed to compile frame-omni-bencher')
|
||||
sys.exit(1)
|
||||
|
||||
# loop over remaining runtimes to collect available pallets
|
||||
for runtime in runtimesMatrix.values():
|
||||
build_command = f"forklift cargo build -q -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}"
|
||||
print(f'-- building "{runtime["name"]}" with `{build_command}`')
|
||||
build_status = os.system(build_command)
|
||||
if build_status != 0:
|
||||
print_and_log(f'❌ Failed to build {runtime["name"]}')
|
||||
if args.fail_fast:
|
||||
sys.exit(1)
|
||||
else:
|
||||
continue
|
||||
|
||||
print(f'-- listing pallets for benchmark for {runtime["name"]}')
|
||||
wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm"
|
||||
list_command = f"frame-omni-bencher v1 benchmark pallet " \
|
||||
f"--no-csv-header " \
|
||||
f"--no-storage-info " \
|
||||
f"--no-min-squares " \
|
||||
f"--no-median-slopes " \
|
||||
f"--all " \
|
||||
f"--list " \
|
||||
f"--runtime={wasm_file} " \
|
||||
f"{runtime['bench_flags']}"
|
||||
print(f'-- running: {list_command}')
|
||||
output = os.popen(list_command).read()
|
||||
raw_pallets = output.strip().split('\n')
|
||||
|
||||
all_pallets = set()
|
||||
for pallet in raw_pallets:
|
||||
if pallet:
|
||||
all_pallets.add(pallet.split(',')[0].strip())
|
||||
|
||||
pallets = list(all_pallets)
|
||||
print(f'Pallets in {runtime["name"]}: {pallets}')
|
||||
runtime_pallets_map[runtime['name']] = pallets
|
||||
|
||||
print(f'\n')
|
||||
|
||||
# filter out only the specified pallets from collected runtimes/pallets
|
||||
if args.pallet:
|
||||
print(f'Pallets: {args.pallet}')
|
||||
new_pallets_map = {}
|
||||
# keep only specified pallets if they exist in the runtime
|
||||
for runtime in runtime_pallets_map:
|
||||
if set(args.pallet).issubset(set(runtime_pallets_map[runtime])):
|
||||
new_pallets_map[runtime] = args.pallet
|
||||
|
||||
runtime_pallets_map = new_pallets_map
|
||||
|
||||
print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n')
|
||||
|
||||
if not runtime_pallets_map:
|
||||
if args.pallet and not args.runtime:
|
||||
print(f"No pallets {args.pallet} found in any runtime")
|
||||
elif args.runtime and not args.pallet:
|
||||
print(f"{args.runtime} runtime does not have any pallets")
|
||||
elif args.runtime and args.pallet:
|
||||
print(f"No pallets {args.pallet} found in {args.runtime}")
|
||||
else:
|
||||
print('No runtimes found')
|
||||
sys.exit(1)
|
||||
|
||||
for runtime in runtime_pallets_map:
|
||||
for pallet in runtime_pallets_map[runtime]:
|
||||
config = runtimesMatrix[runtime]
|
||||
header_path = os.path.abspath(config['header'])
|
||||
template = None
|
||||
|
||||
print(f'-- config: {config}')
|
||||
if runtime == 'dev':
|
||||
# to support sub-modules (https://github.com/paritytech/command-bot/issues/275)
|
||||
search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'"
|
||||
print(f'-- running: {search_manifest_path}')
|
||||
manifest_path = os.popen(search_manifest_path).read()
|
||||
if not manifest_path:
|
||||
print(f'-- pallet {pallet} not found in dev runtime')
|
||||
if args.fail_fast:
|
||||
print_and_log(f'Error: {pallet} not found in dev runtime')
|
||||
sys.exit(1)
|
||||
package_dir = os.path.dirname(manifest_path)
|
||||
print(f'-- package_dir: {package_dir}')
|
||||
print(f'-- manifest_path: {manifest_path}')
|
||||
output_path = os.path.join(package_dir, "src", "weights.rs")
|
||||
# TODO: we can remove once all pallets in dev runtime are migrated to polkadot-sdk-frame
|
||||
try:
|
||||
uses_polkadot_sdk_frame = "true" in os.popen(f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .dependencies | any(.name == \"polkadot-sdk-frame\")'").read()
|
||||
print(f'uses_polkadot_sdk_frame: {uses_polkadot_sdk_frame}')
|
||||
# Empty output from the previous os.popen command
|
||||
except StopIteration:
|
||||
print(f'Error: {pallet} not found in dev runtime')
|
||||
uses_polkadot_sdk_frame = False
|
||||
template = config['template']
|
||||
if uses_polkadot_sdk_frame and re.match(r"frame-(:?umbrella-)?weight-template\.hbs", os.path.normpath(template).split(os.path.sep)[-1]):
|
||||
template = "substrate/.maintain/frame-umbrella-weight-template.hbs"
|
||||
print(f'template: {template}')
|
||||
else:
|
||||
default_path = f"./{config['path']}/src/weights"
|
||||
xcm_path = f"./{config['path']}/src/weights/xcm"
|
||||
output_path = default_path
|
||||
if pallet.startswith("pallet_xcm_benchmarks"):
|
||||
template = config['template']
|
||||
output_path = xcm_path
|
||||
|
||||
print(f'-- benchmarking {pallet} in {runtime} into {output_path}')
|
||||
cmd = f"frame-omni-bencher v1 benchmark pallet " \
|
||||
f"--extrinsic=* " \
|
||||
f"--runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm " \
|
||||
f"--pallet={pallet} " \
|
||||
f"--header={header_path} " \
|
||||
f"--output={output_path} " \
|
||||
f"--wasm-execution=compiled " \
|
||||
f"--steps=50 " \
|
||||
f"--repeat=20 " \
|
||||
f"--heap-pages=4096 " \
|
||||
f"{f'--template={template} ' if template else ''}" \
|
||||
f"--no-storage-info --no-min-squares --no-median-slopes " \
|
||||
f"{config['bench_flags']}"
|
||||
print(f'-- Running: {cmd} \n')
|
||||
status = os.system(cmd)
|
||||
|
||||
if status != 0 and args.fail_fast:
|
||||
print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}')
|
||||
sys.exit(1)
|
||||
|
||||
# Otherwise collect failed benchmarks and print them at the end
|
||||
# push failed pallets to failed_benchmarks
|
||||
if status != 0:
|
||||
failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet]
|
||||
else:
|
||||
successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet]
|
||||
|
||||
if failed_benchmarks:
|
||||
print_and_log('❌ Failed benchmarks of runtimes/pallets:')
|
||||
for runtime, pallets in failed_benchmarks.items():
|
||||
print_and_log(f'-- {runtime}: {pallets}')
|
||||
|
||||
if successful_benchmarks:
|
||||
print_and_log('✅ Successful benchmarks of runtimes/pallets:')
|
||||
for runtime, pallets in successful_benchmarks.items():
|
||||
print_and_log(f'-- {runtime}: {pallets}')
|
||||
|
||||
elif args.command == 'fmt':
|
||||
command = f"cargo +nightly fmt"
|
||||
print(f'Formatting with `{command}`')
|
||||
nightly_status = os.system(f'{command}')
|
||||
taplo_status = os.system('taplo format --config .config/taplo.toml')
|
||||
|
||||
if (nightly_status != 0 or taplo_status != 0):
|
||||
print_and_log('❌ Failed to format code')
|
||||
sys.exit(1)
|
||||
|
||||
elif args.command == 'update-ui':
|
||||
command = 'sh ./scripts/update-ui-tests.sh'
|
||||
print(f'Updating ui with `{command}`')
|
||||
status = os.system(f'{command}')
|
||||
|
||||
if status != 0:
|
||||
print_and_log('❌ Failed to update ui')
|
||||
sys.exit(1)
|
||||
|
||||
elif args.command == 'prdoc':
|
||||
# Call the main function from ./github/scripts/generate-prdoc.py module
|
||||
exit_code = generate_prdoc.main(args)
|
||||
if exit_code != 0:
|
||||
print_and_log('❌ Failed to generate prdoc')
|
||||
sys.exit(exit_code)
|
||||
|
||||
elif args.command == 'label':
|
||||
# The actual labeling is handled by the GitHub Action workflow
|
||||
# This script validates and auto-corrects labels
|
||||
|
||||
try:
|
||||
# Check if PR is still open and not merged/in merge queue
|
||||
pr_number = os.environ.get('PR_NUM')
|
||||
if pr_number:
|
||||
if not check_pr_status(pr_number):
|
||||
raise ValueError("Cannot modify labels on merged PRs or PRs in merge queue")
|
||||
|
||||
# Check if user has permission to modify labels
|
||||
is_org_member = os.environ.get('IS_ORG_MEMBER', 'false').lower() == 'true'
|
||||
is_pr_author = os.environ.get('IS_PR_AUTHOR', 'false').lower() == 'true'
|
||||
|
||||
if not is_org_member and not is_pr_author:
|
||||
raise ValueError("Only the PR author or organization members can modify labels")
|
||||
|
||||
# Get allowed labels dynamically
|
||||
try:
|
||||
allowed_labels = get_allowed_labels()
|
||||
except RuntimeError as e:
|
||||
raise ValueError(str(e))
|
||||
|
||||
# Validate and auto-correct labels
|
||||
final_labels, correction_messages = validate_and_auto_correct_labels(args.labels, allowed_labels)
|
||||
|
||||
# Show auto-correction messages
|
||||
for message in correction_messages:
|
||||
print(message)
|
||||
|
||||
# Output labels as JSON for GitHub Action
|
||||
import json
|
||||
labels_output = {"labels": final_labels}
|
||||
print(f"LABELS_JSON: {json.dumps(labels_output)}")
|
||||
except ValueError as e:
|
||||
print_and_log(f'❌ {e}')
|
||||
|
||||
# Output error as JSON for GitHub Action
|
||||
import json
|
||||
error_output = {
|
||||
"error": "validation_failed",
|
||||
"message": "Invalid labels found. Please check the suggestions below and try again.",
|
||||
"details": str(e)
|
||||
}
|
||||
print(f"ERROR_JSON: {json.dumps(error_output)}")
|
||||
sys.exit(1)
|
||||
|
||||
print('🚀 Done')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,773 @@
|
||||
import unittest
|
||||
from unittest.mock import patch, mock_open, MagicMock, call
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
|
||||
# Mock data for runtimes-matrix.json
|
||||
mock_runtimes_matrix = [
|
||||
{
|
||||
"name": "dev",
|
||||
"package": "kitchensink-runtime",
|
||||
"path": "substrate/frame",
|
||||
"header": "substrate/HEADER-APACHE2",
|
||||
"template": "substrate/.maintain/frame-weight-template.hbs",
|
||||
"bench_features": "runtime-benchmarks",
|
||||
"bench_flags": "--flag1 --flag2"
|
||||
},
|
||||
{
|
||||
"name": "zagros",
|
||||
"package": "zagros-runtime",
|
||||
"path": "pezkuwi/runtime/zagros",
|
||||
"header": "pezkuwi/file_header.txt",
|
||||
"template": "pezkuwi/xcm/pallet-xcm-benchmarks/template.hbs",
|
||||
"bench_features": "runtime-benchmarks",
|
||||
"bench_flags": "--flag3 --flag4"
|
||||
},
|
||||
{
|
||||
"name": "pezkuwichain",
|
||||
"package": "pezkuwichain-runtime",
|
||||
"path": "pezkuwi/runtime/pezkuwichain",
|
||||
"header": "pezkuwi/file_header.txt",
|
||||
"template": "pezkuwi/xcm/pallet-xcm-benchmarks/template.hbs",
|
||||
"bench_features": "runtime-benchmarks",
|
||||
"bench_flags": ""
|
||||
},
|
||||
{
|
||||
"name": "asset-hub-zagros",
|
||||
"package": "asset-hub-zagros-runtime",
|
||||
"path": "cumulus/teyrchains/runtimes/assets/asset-hub-zagros",
|
||||
"header": "cumulus/file_header.txt",
|
||||
"template": "cumulus/templates/xcm-bench-template.hbs",
|
||||
"bench_features": "runtime-benchmarks",
|
||||
"bench_flags": "--flag7 --flag8"
|
||||
}
|
||||
]
|
||||
|
||||
def get_mock_bench_output(runtime, pallets, output_path, header, bench_flags, template = None):
|
||||
return f"frame-omni-bencher v1 benchmark pallet --extrinsic=* " \
|
||||
f"--runtime=target/production/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \
|
||||
f"--pallet={pallets} --header={header} " \
|
||||
f"--output={output_path} " \
|
||||
f"--wasm-execution=compiled " \
|
||||
f"--steps=50 --repeat=20 --heap-pages=4096 " \
|
||||
f"{f'--template={template} ' if template else ''}" \
|
||||
f"--no-storage-info --no-min-squares --no-median-slopes " \
|
||||
f"{bench_flags}"
|
||||
|
||||
class TestCmd(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.patcher1 = patch('builtins.open', new_callable=mock_open, read_data=json.dumps(mock_runtimes_matrix))
|
||||
self.patcher2 = patch('json.load', return_value=mock_runtimes_matrix)
|
||||
self.patcher3 = patch('argparse.ArgumentParser.parse_known_args')
|
||||
self.patcher4 = patch('os.system', return_value=0)
|
||||
self.patcher5 = patch('os.popen')
|
||||
self.patcher6 = patch('importlib.util.spec_from_file_location', return_value=MagicMock())
|
||||
self.patcher7 = patch('importlib.util.module_from_spec', return_value=MagicMock())
|
||||
self.patcher8 = patch('cmd.generate_prdoc.main', return_value=0)
|
||||
|
||||
self.mock_open = self.patcher1.start()
|
||||
self.mock_json_load = self.patcher2.start()
|
||||
self.mock_parse_args = self.patcher3.start()
|
||||
self.mock_system = self.patcher4.start()
|
||||
self.mock_popen = self.patcher5.start()
|
||||
self.mock_spec_from_file_location = self.patcher6.start()
|
||||
self.mock_module_from_spec = self.patcher7.start()
|
||||
self.mock_generate_prdoc_main = self.patcher8.start()
|
||||
|
||||
# Ensure that cmd.py uses the mock_runtimes_matrix
|
||||
import cmd
|
||||
cmd.runtimesMatrix = mock_runtimes_matrix
|
||||
|
||||
def tearDown(self):
|
||||
self.patcher1.stop()
|
||||
self.patcher2.stop()
|
||||
self.patcher3.stop()
|
||||
self.patcher4.stop()
|
||||
self.patcher5.stop()
|
||||
self.patcher6.stop()
|
||||
self.patcher7.stop()
|
||||
self.patcher8.stop()
|
||||
|
||||
def test_bench_command_normal_execution_all_runtimes(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)),
|
||||
pallet=['pallet_balances'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_balances\npallet_staking\npallet_something\n", # Output for dev runtime
|
||||
"pallet_balances\npallet_staking\npallet_something\n", # Output for zagros runtime
|
||||
"pallet_staking\npallet_something\n", # Output for pezkuwichain runtime - no pallet here
|
||||
"pallet_balances\npallet_staking\npallet_something\n", # Output for asset-hub-zagros runtime
|
||||
"./substrate/frame/balances/Cargo.toml\n", # Mock manifest path for dev -> pallet_balances
|
||||
]
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p kitchensink-runtime --profile production --features=runtime-benchmarks"),
|
||||
call("forklift cargo build -q -p zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
call("forklift cargo build -q -p pezkuwichain-runtime --profile production --features=runtime-benchmarks"),
|
||||
call("forklift cargo build -q -p asset-hub-zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
|
||||
call(get_mock_bench_output(
|
||||
runtime='kitchensink',
|
||||
pallets='pallet_balances',
|
||||
output_path='./substrate/frame/balances/src/weights.rs',
|
||||
header=os.path.abspath('substrate/HEADER-APACHE2'),
|
||||
bench_flags='--flag1 --flag2',
|
||||
template="substrate/.maintain/frame-weight-template.hbs"
|
||||
)),
|
||||
call(get_mock_bench_output(
|
||||
runtime='zagros',
|
||||
pallets='pallet_balances',
|
||||
output_path='./pezkuwi/runtime/zagros/src/weights',
|
||||
header=os.path.abspath('pezkuwi/file_header.txt'),
|
||||
bench_flags='--flag3 --flag4'
|
||||
)),
|
||||
# skips pezkuwichain benchmark
|
||||
call(get_mock_bench_output(
|
||||
runtime='asset-hub-zagros',
|
||||
pallets='pallet_balances',
|
||||
output_path='./cumulus/teyrchains/runtimes/assets/asset-hub-zagros/src/weights',
|
||||
header=os.path.abspath('cumulus/file_header.txt'),
|
||||
bench_flags='--flag7 --flag8'
|
||||
)),
|
||||
]
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
def test_bench_command_normal_execution(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=['zagros'],
|
||||
pallet=['pallet_balances', 'pallet_staking'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
header_path = os.path.abspath('pezkuwi/file_header.txt')
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_balances\npallet_staking\npallet_something\n", # Output for zagros runtime
|
||||
]
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
|
||||
# Zagros runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='zagros',
|
||||
pallets='pallet_balances',
|
||||
output_path='./pezkuwi/runtime/zagros/src/weights',
|
||||
header=header_path,
|
||||
bench_flags='--flag3 --flag4'
|
||||
)),
|
||||
call(get_mock_bench_output(
|
||||
runtime='zagros',
|
||||
pallets='pallet_staking',
|
||||
output_path='./pezkuwi/runtime/zagros/src/weights',
|
||||
header=header_path,
|
||||
bench_flags='--flag3 --flag4'
|
||||
)),
|
||||
]
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
|
||||
def test_bench_command_normal_execution_xcm(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=['zagros'],
|
||||
pallet=['pallet_xcm_benchmarks::generic'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
header_path = os.path.abspath('pezkuwi/file_header.txt')
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_balances\npallet_staking\npallet_something\npallet_xcm_benchmarks::generic\n", # Output for zagros runtime
|
||||
]
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
|
||||
# Zagros runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='zagros',
|
||||
pallets='pallet_xcm_benchmarks::generic',
|
||||
output_path='./pezkuwi/runtime/zagros/src/weights/xcm',
|
||||
header=header_path,
|
||||
bench_flags='--flag3 --flag4',
|
||||
template="pezkuwi/xcm/pallet-xcm-benchmarks/template.hbs"
|
||||
)),
|
||||
]
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
def test_bench_command_two_runtimes_two_pallets(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=['zagros', 'pezkuwichain'],
|
||||
pallet=['pallet_balances', 'pallet_staking'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_staking\npallet_balances\n", # Output for zagros runtime
|
||||
"pallet_staking\npallet_balances\n", # Output for pezkuwichain runtime
|
||||
]
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
header_path = os.path.abspath('pezkuwi/file_header.txt')
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
call("forklift cargo build -q -p pezkuwichain-runtime --profile production --features=runtime-benchmarks"),
|
||||
# Zagros runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='zagros',
|
||||
pallets='pallet_staking',
|
||||
output_path='./pezkuwi/runtime/zagros/src/weights',
|
||||
header=header_path,
|
||||
bench_flags='--flag3 --flag4'
|
||||
)),
|
||||
call(get_mock_bench_output(
|
||||
runtime='zagros',
|
||||
pallets='pallet_balances',
|
||||
output_path='./pezkuwi/runtime/zagros/src/weights',
|
||||
header=header_path,
|
||||
bench_flags='--flag3 --flag4'
|
||||
)),
|
||||
# Pezkuwichain runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='pezkuwichain',
|
||||
pallets='pallet_staking',
|
||||
output_path='./pezkuwi/runtime/pezkuwichain/src/weights',
|
||||
header=header_path,
|
||||
bench_flags=''
|
||||
)),
|
||||
call(get_mock_bench_output(
|
||||
runtime='pezkuwichain',
|
||||
pallets='pallet_balances',
|
||||
output_path='./pezkuwi/runtime/pezkuwichain/src/weights',
|
||||
header=header_path,
|
||||
bench_flags=''
|
||||
)),
|
||||
]
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
def test_bench_command_one_dev_runtime(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=['dev'],
|
||||
pallet=['pallet_balances'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
manifest_dir = "substrate/frame/kitchensink"
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_balances\npallet_something", # Output for dev runtime
|
||||
manifest_dir + "/Cargo.toml" # Output for manifest path in dev runtime
|
||||
]
|
||||
header_path = os.path.abspath('substrate/HEADER-APACHE2')
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p kitchensink-runtime --profile production --features=runtime-benchmarks"),
|
||||
# Westend runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='kitchensink',
|
||||
pallets='pallet_balances',
|
||||
output_path=manifest_dir + "/src/weights.rs",
|
||||
header=header_path,
|
||||
bench_flags='--flag1 --flag2',
|
||||
template="substrate/.maintain/frame-weight-template.hbs"
|
||||
)),
|
||||
]
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
def test_bench_command_one_cumulus_runtime(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=['asset-hub-zagros'],
|
||||
pallet=['pallet_assets'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_assets\n", # Output for asset-hub-zagros runtime
|
||||
]
|
||||
header_path = os.path.abspath('cumulus/file_header.txt')
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p asset-hub-zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
# Asset-hub-zagros runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='asset-hub-zagros',
|
||||
pallets='pallet_assets',
|
||||
output_path='./cumulus/teyrchains/runtimes/assets/asset-hub-zagros/src/weights',
|
||||
header=header_path,
|
||||
bench_flags='--flag7 --flag8'
|
||||
)),
|
||||
]
|
||||
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
def test_bench_command_one_cumulus_runtime_xcm(self):
|
||||
self.mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='bench-omni',
|
||||
runtime=['asset-hub-zagros'],
|
||||
pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'],
|
||||
fail_fast=True,
|
||||
quiet=False,
|
||||
clean=False,
|
||||
image=None
|
||||
), [])
|
||||
self.mock_popen.return_value.read.side_effect = [
|
||||
"pallet_assets\npallet_xcm_benchmarks::generic\n", # Output for asset-hub-zagros runtime
|
||||
]
|
||||
header_path = os.path.abspath('cumulus/file_header.txt')
|
||||
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
expected_calls = [
|
||||
# Build calls
|
||||
call("forklift cargo build -q -p asset-hub-zagros-runtime --profile production --features=runtime-benchmarks"),
|
||||
# Asset-hub-zagros runtime calls
|
||||
call(get_mock_bench_output(
|
||||
runtime='asset-hub-zagros',
|
||||
pallets='pallet_xcm_benchmarks::generic',
|
||||
output_path='./cumulus/teyrchains/runtimes/assets/asset-hub-zagros/src/weights/xcm',
|
||||
header=header_path,
|
||||
bench_flags='--flag7 --flag8',
|
||||
template="cumulus/templates/xcm-bench-template.hbs"
|
||||
)),
|
||||
call(get_mock_bench_output(
|
||||
runtime='asset-hub-zagros',
|
||||
pallets='pallet_assets',
|
||||
output_path='./cumulus/teyrchains/runtimes/assets/asset-hub-zagros/src/weights',
|
||||
header=header_path,
|
||||
bench_flags='--flag7 --flag8'
|
||||
)),
|
||||
]
|
||||
|
||||
self.mock_system.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt'), []))
|
||||
@patch('os.system', return_value=0)
|
||||
def test_fmt_command(self, mock_system, mock_parse_args):
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
mock_system.assert_any_call('cargo +nightly fmt')
|
||||
mock_system.assert_any_call('taplo format --config .config/taplo.toml')
|
||||
|
||||
@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui'), []))
|
||||
@patch('os.system', return_value=0)
|
||||
def test_update_ui_command(self, mock_system, mock_parse_args):
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
mock_system.assert_called_with('sh ./scripts/update-ui-tests.sh')
|
||||
|
||||
@patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='prdoc'), []))
|
||||
@patch('os.system', return_value=0)
|
||||
def test_prdoc_command(self, mock_system, mock_parse_args):
|
||||
with patch('sys.exit') as mock_exit:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
self.mock_generate_prdoc_main.assert_called_with(mock_parse_args.return_value[0])
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_valid_labels(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command with valid labels"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required', 'D2-substantial']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME', 'R0-no-crate-publish-required']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
# Check that JSON output was printed
|
||||
json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'LABELS_JSON:' in str(call):
|
||||
json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(json_call)
|
||||
self.assertIn('T1-FRAME', str(json_call))
|
||||
self.assertIn('R0-no-crate-publish-required', str(json_call))
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_auto_correction(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command with auto-correctable typos"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required', 'D2-substantial']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAM', 'R0-no-crate-publish'] # Typos that should be auto-corrected
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
# Check for auto-correction messages
|
||||
correction_messages = [str(call) for call in mock_print.call_args_list if 'Auto-corrected' in str(call)]
|
||||
self.assertTrue(len(correction_messages) > 0)
|
||||
|
||||
# Check that JSON output contains corrected labels
|
||||
json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'LABELS_JSON:' in str(call):
|
||||
json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(json_call)
|
||||
self.assertIn('T1-FRAME', str(json_call))
|
||||
self.assertIn('R0-no-crate-publish-required', str(json_call))
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_prefix_correction(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command with prefix matching"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'T2-pallets', 'R0-no-crate-publish-required']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-something'] # Should match T1-FRAME as the only T1- label
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
# Check that JSON output contains corrected label
|
||||
json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'LABELS_JSON:' in str(call):
|
||||
json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(json_call)
|
||||
self.assertIn('T1-FRAME', str(json_call))
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_invalid_labels(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command with invalid labels that cannot be corrected"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required', 'D2-substantial']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['INVALID-LABEL', 'ANOTHER-BAD-LABEL']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_called_with(1) # Should exit with error code
|
||||
|
||||
# Check for error JSON output
|
||||
error_json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'ERROR_JSON:' in str(call):
|
||||
error_json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(error_json_call)
|
||||
self.assertIn('validation_failed', str(error_json_call))
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_mixed_valid_invalid(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command with mix of valid and invalid labels"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required', 'D2-substantial']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME', 'INVALID-LABEL', 'D2-substantial']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_called_with(1) # Should exit with error code due to invalid label
|
||||
|
||||
# Check for error JSON output
|
||||
error_json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'ERROR_JSON:' in str(call):
|
||||
error_json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(error_json_call)
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_fetch_failure(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command when label fetching fails"""
|
||||
mock_get_labels.side_effect = RuntimeError("Failed to fetch labels from repository. Please check your connection and try again.")
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_called_with(1) # Should exit with error code
|
||||
|
||||
# Check for error JSON output
|
||||
error_json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'ERROR_JSON:' in str(call):
|
||||
error_json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(error_json_call)
|
||||
self.assertIn('Failed to fetch labels from repository', str(error_json_call))
|
||||
|
||||
def test_auto_correct_labels_function(self):
|
||||
"""Test the auto_correct_labels function directly"""
|
||||
import cmd
|
||||
|
||||
valid_labels = ['T1-FRAME', 'R0-no-crate-publish-required', 'D2-substantial', 'I2-bug']
|
||||
|
||||
# Test high similarity auto-correction
|
||||
corrections, suggestions = cmd.auto_correct_labels(['T1-FRAM'], valid_labels)
|
||||
self.assertEqual(len(corrections), 1)
|
||||
self.assertEqual(corrections[0][0], 'T1-FRAM')
|
||||
self.assertEqual(corrections[0][1], 'T1-FRAME')
|
||||
|
||||
# Test low similarity suggestions
|
||||
corrections, suggestions = cmd.auto_correct_labels(['TOTALLY-WRONG'], valid_labels)
|
||||
self.assertEqual(len(corrections), 0)
|
||||
self.assertEqual(len(suggestions), 1)
|
||||
|
||||
def test_find_closest_labels_function(self):
|
||||
"""Test the find_closest_labels function directly"""
|
||||
import cmd
|
||||
|
||||
valid_labels = ['T1-FRAME', 'T2-pallets', 'R0-no-crate-publish-required']
|
||||
|
||||
# Test finding close matches
|
||||
matches = cmd.find_closest_labels('T1-FRAM', valid_labels)
|
||||
self.assertIn('T1-FRAME', matches)
|
||||
|
||||
# Test no close matches
|
||||
matches = cmd.find_closest_labels('COMPLETELY-DIFFERENT', valid_labels, cutoff=0.8)
|
||||
self.assertEqual(len(matches), 0)
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_merged_pr(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command on merged PR should fail"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required']
|
||||
mock_check_pr_status.return_value = False # PR is merged/closed
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_called_with(1)
|
||||
|
||||
# Check for error JSON output
|
||||
error_json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'ERROR_JSON:' in str(call):
|
||||
error_json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(error_json_call)
|
||||
self.assertIn('Cannot modify labels on merged PRs', str(error_json_call))
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_open_pr(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command on open PR should succeed"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
# Check that JSON output was printed
|
||||
json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'LABELS_JSON:' in str(call):
|
||||
json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(json_call)
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'false', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_unauthorized_user(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command by unauthorized user should fail"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_called_with(1)
|
||||
|
||||
# Check for error JSON output
|
||||
error_json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'ERROR_JSON:' in str(call):
|
||||
error_json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(error_json_call)
|
||||
self.assertIn('Only the PR author or organization members can modify labels', str(error_json_call))
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'false', 'IS_PR_AUTHOR': 'true', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_pr_author(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command by PR author should succeed"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
# Check that JSON output was printed
|
||||
json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'LABELS_JSON:' in str(call):
|
||||
json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(json_call)
|
||||
|
||||
@patch.dict('os.environ', {'PR_NUM': '123', 'IS_ORG_MEMBER': 'true', 'IS_PR_AUTHOR': 'false', 'GITHUB_TOKEN': 'fake_token'})
|
||||
@patch('cmd.get_allowed_labels')
|
||||
@patch('cmd.check_pr_status')
|
||||
@patch('argparse.ArgumentParser.parse_known_args')
|
||||
def test_label_command_org_member(self, mock_parse_args, mock_check_pr_status, mock_get_labels):
|
||||
"""Test label command by org member should succeed"""
|
||||
mock_get_labels.return_value = ['T1-FRAME', 'R0-no-crate-publish-required']
|
||||
mock_check_pr_status.return_value = True # PR is open
|
||||
mock_parse_args.return_value = (argparse.Namespace(
|
||||
command='label',
|
||||
labels=['T1-FRAME']
|
||||
), [])
|
||||
|
||||
with patch('sys.exit') as mock_exit, patch('builtins.print') as mock_print:
|
||||
import cmd
|
||||
cmd.main()
|
||||
mock_exit.assert_not_called()
|
||||
|
||||
# Check that JSON output was printed
|
||||
json_call = None
|
||||
for call in mock_print.call_args_list:
|
||||
if 'LABELS_JSON:' in str(call):
|
||||
json_call = call
|
||||
break
|
||||
|
||||
self.assertIsNotNone(json_call)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user