Skip to content

Commit

Permalink
/cmd followups (#5533)
Browse files Browse the repository at this point in the history
Closes: #5545

- add missing template for frame & xcm benchmarks
- fix `git pull` ->
https://github.com/paritytech/polkadot-sdk/actions/runs/10644887539/job/29510118915
- respect runtimes headers - use GNU instead of apache for runtimes
- adds tests for cmd.py

Tip: review this one with Whitespace hidden

![image](https://github.com/user-attachments/assets/3bcdc6c2-7371-428f-9962-556ca81c1467)

---------

Co-authored-by: GitHub Action <[email protected]>
  • Loading branch information
mordamax and actions-user authored Sep 5, 2024
1 parent 702a15c commit 8d81f1e
Show file tree
Hide file tree
Showing 6 changed files with 584 additions and 195 deletions.
275 changes: 150 additions & 125 deletions .github/scripts/cmd/cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
f = open('.github/workflows/runtimes-matrix.json', 'r')
runtimesMatrix = json.load(f)

print(f'runtimesMatrix: {runtimesMatrix}\n')

runtimeNames = list(map(lambda x: x['name'], runtimesMatrix))

common_args = {
Expand Down Expand Up @@ -67,130 +69,153 @@
for arg, config in common_args.items():
parser_ui.add_argument(arg, **config)

def main():
global args, unknown, runtimesMatrix
args, unknown = parser.parse_known_args()

print(f'args: {args}')

if args.command == 'bench':
runtime_pallets_map = {}
failed_benchmarks = {}
successful_benchmarks = {}

profile = "release"

print(f'Provided runtimes: {args.runtime}')
# convert to mapped dict
runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix))
runtimesMatrix = {x['name']: x for x in runtimesMatrix}
print(f'Filtered out runtimes: {runtimesMatrix}')

# loop over remaining runtimes to collect available pallets
for runtime in runtimesMatrix.values():
os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features runtime-benchmarks")
print(f'-- listing pallets for benchmark for {runtime["name"]}')
wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm"
output = os.popen(
f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file}").read()
raw_pallets = output.strip().split('\n')

all_pallets = set()
for pallet in raw_pallets:
if pallet:
all_pallets.add(pallet.split(',')[0].strip())

pallets = list(all_pallets)
print(f'Pallets in {runtime["name"]}: {pallets}')
runtime_pallets_map[runtime['name']] = pallets

print(f'\n')

# filter out only the specified pallets from collected runtimes/pallets
if args.pallet:
print(f'Pallets: {args.pallet}')
new_pallets_map = {}
# keep only specified pallets if they exist in the runtime
for runtime in runtime_pallets_map:
if set(args.pallet).issubset(set(runtime_pallets_map[runtime])):
new_pallets_map[runtime] = args.pallet

runtime_pallets_map = new_pallets_map

print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n')

if not runtime_pallets_map:
if args.pallet and not args.runtime:
print(f"No pallets {args.pallet} found in any runtime")
elif args.runtime and not args.pallet:
print(f"{args.runtime} runtime does not have any pallets")
elif args.runtime and args.pallet:
print(f"No pallets {args.pallet} found in {args.runtime}")
else:
print('No runtimes found')
sys.exit(1)

args, unknown = parser.parse_known_args()

print(f'args: {args}')

if args.command == 'bench':
runtime_pallets_map = {}
failed_benchmarks = {}
successful_benchmarks = {}

profile = "release"

print(f'Provided runtimes: {args.runtime}')
# convert to mapped dict
runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix))
runtimesMatrix = {x['name']: x for x in runtimesMatrix}
print(f'Filtered out runtimes: {runtimesMatrix}')

# loop over remaining runtimes to collect available pallets
for runtime in runtimesMatrix.values():
os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features runtime-benchmarks")
print(f'-- listing pallets for benchmark for {runtime["name"]}')
wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm"
output = os.popen(
f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file}").read()
raw_pallets = output.strip().split('\n')

all_pallets = set()
for pallet in raw_pallets:
if pallet:
all_pallets.add(pallet.split(',')[0].strip())

pallets = list(all_pallets)
print(f'Pallets in {runtime}: {pallets}')
runtime_pallets_map[runtime['name']] = pallets

# filter out only the specified pallets from collected runtimes/pallets
if args.pallet:
print(f'Pallet: {args.pallet}')
new_pallets_map = {}
# keep only specified pallets if they exist in the runtime
for runtime in runtime_pallets_map:
if set(args.pallet).issubset(set(runtime_pallets_map[runtime])):
new_pallets_map[runtime] = args.pallet

runtime_pallets_map = new_pallets_map

print(f'Filtered out runtimes & pallets: {runtime_pallets_map}')

if not runtime_pallets_map:
if args.pallet and not args.runtime:
print(f"No pallets {args.pallet} found in any runtime")
elif args.runtime and not args.pallet:
print(f"{args.runtime} runtime does not have any pallets")
elif args.runtime and args.pallet:
print(f"No pallets {args.pallet} found in {args.runtime}")
else:
print('No runtimes found')
sys.exit(1)

header_path = os.path.abspath('./substrate/HEADER-APACHE2')

for runtime in runtime_pallets_map:
for pallet in runtime_pallets_map[runtime]:
config = runtimesMatrix[runtime]
print(f'-- config: {config}')
if runtime == 'dev':
# to support sub-modules (https://github.com/paritytech/command-bot/issues/275)
search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'"
print(f'-- running: {search_manifest_path}')
manifest_path = os.popen(search_manifest_path).read()
if not manifest_path:
print(f'-- pallet {pallet} not found in dev runtime')
exit(1)
package_dir = os.path.dirname(manifest_path)
print(f'-- package_dir: {package_dir}')
print(f'-- manifest_path: {manifest_path}')
output_path = os.path.join(package_dir, "src", "weights.rs")
else:
default_path = f"./{config['path']}/src/weights"
xcm_path = f"./{config['path']}/src/weights/xcm"
output_path = default_path if not pallet.startswith("pallet_xcm_benchmarks") else xcm_path
print(f'-- benchmarking {pallet} in {runtime} into {output_path}')
cmd = f"frame-omni-bencher v1 benchmark pallet --extrinsic=* --runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm --pallet={pallet} --header={header_path} --output={output_path} --wasm-execution=compiled --steps=50 --repeat=20 --heap-pages=4096 --no-storage-info --no-min-squares --no-median-slopes"
print(f'-- Running: {cmd}')
status = os.system(cmd)
if status != 0 and not args.continue_on_fail:
print(f'Failed to benchmark {pallet} in {runtime}')
sys.exit(1)

# Otherwise collect failed benchmarks and print them at the end
# push failed pallets to failed_benchmarks
if status != 0:
failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet]
else:
successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet]

if failed_benchmarks:
print('❌ Failed benchmarks of runtimes/pallets:')
for runtime, pallets in failed_benchmarks.items():
print(f'-- {runtime}: {pallets}')

if successful_benchmarks:
print('✅ Successful benchmarks of runtimes/pallets:')
for runtime, pallets in successful_benchmarks.items():
print(f'-- {runtime}: {pallets}')

elif args.command == 'fmt':
command = f"cargo +nightly fmt"
print(f'Formatting with `{command}`')
nightly_status = os.system(f'{command}')
taplo_status = os.system('taplo format --config .config/taplo.toml')

if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail:
print('❌ Failed to format code')
sys.exit(1)

elif args.command == 'update-ui':
command = 'sh ./scripts/update-ui-tests.sh'
print(f'Updating ui with `{command}`')
status = os.system(f'{command}')

if status != 0 and not args.continue_on_fail:
print('❌ Failed to format code')
sys.exit(1)

print('🚀 Done')
for pallet in runtime_pallets_map[runtime]:
config = runtimesMatrix[runtime]
header_path = os.path.abspath(config['header'])
template = None

print(f'-- config: {config}')
if runtime == 'dev':
# to support sub-modules (https://github.com/paritytech/command-bot/issues/275)
search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'"
print(f'-- running: {search_manifest_path}')
manifest_path = os.popen(search_manifest_path).read()
if not manifest_path:
print(f'-- pallet {pallet} not found in dev runtime')
exit(1)
package_dir = os.path.dirname(manifest_path)
print(f'-- package_dir: {package_dir}')
print(f'-- manifest_path: {manifest_path}')
output_path = os.path.join(package_dir, "src", "weights.rs")
template = config['template']
else:
default_path = f"./{config['path']}/src/weights"
xcm_path = f"./{config['path']}/src/weights/xcm"
output_path = default_path
if pallet.startswith("pallet_xcm_benchmarks"):
template = config['template']
output_path = xcm_path

print(f'-- benchmarking {pallet} in {runtime} into {output_path}')
cmd = f"frame-omni-bencher v1 benchmark pallet " \
f"--extrinsic=* " \
f"--runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm " \
f"--pallet={pallet} " \
f"--header={header_path} " \
f"--output={output_path} " \
f"--wasm-execution=compiled " \
f"--steps=50 " \
f"--repeat=20 " \
f"--heap-pages=4096 " \
f"{f'--template={template} ' if template else ''}" \
f"--no-storage-info --no-min-squares --no-median-slopes"
print(f'-- Running: {cmd} \n')
status = os.system(cmd)
if status != 0 and not args.continue_on_fail:
print(f'Failed to benchmark {pallet} in {runtime}')
sys.exit(1)

# Otherwise collect failed benchmarks and print them at the end
# push failed pallets to failed_benchmarks
if status != 0:
failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet]
else:
successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet]

if failed_benchmarks:
print('❌ Failed benchmarks of runtimes/pallets:')
for runtime, pallets in failed_benchmarks.items():
print(f'-- {runtime}: {pallets}')

if successful_benchmarks:
print('✅ Successful benchmarks of runtimes/pallets:')
for runtime, pallets in successful_benchmarks.items():
print(f'-- {runtime}: {pallets}')

elif args.command == 'fmt':
command = f"cargo +nightly fmt"
print(f'Formatting with `{command}`')
nightly_status = os.system(f'{command}')
taplo_status = os.system('taplo format --config .config/taplo.toml')

if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail:
print('❌ Failed to format code')
sys.exit(1)

elif args.command == 'update-ui':
command = 'sh ./scripts/update-ui-tests.sh'
print(f'Updating ui with `{command}`')
status = os.system(f'{command}')

if status != 0 and not args.continue_on_fail:
print('❌ Failed to format code')
sys.exit(1)

print('🚀 Done')

if __name__ == '__main__':
main()
Loading

0 comments on commit 8d81f1e

Please sign in to comment.