From 7ab3341060d7ece1b9899cbf43d4999deb11d86e Mon Sep 17 00:00:00 2001 From: Venkkatesh Sekar Date: Mon, 30 Sep 2024 14:32:13 +0000 Subject: [PATCH] fix formatting --- .../determine-initial-guest-os-versions.py | 66 ++++-- .github/scripts/generate-ci.py | 2 +- .../candid_integration_tests.py | 23 +- .../inspect_stamped_trivial_wasm.py | 1 + ci/src/git_changes/git_changes.py | 1 + ci/src/git_changes/git_changes_test.py | 2 + .../check_file_references.py | 41 ++-- .../monitor-expand-shared-data.py | 1 + .../dev-tools/bare_metal_deployment/deploy.py | 215 +++++++++--------- .../parse_voltage_readings.py | 12 +- pre-commit/ruff-format.sh | 1 + pre-commit/ruff-lint.sh | 2 +- .../public_suffix_list_shrink.py | 4 +- rs/coverage.py | 2 +- .../bls12_381/scripts/cost_estimator.py | 100 ++++---- .../cketh/minter/scripts/ofaq_blocklist.py | 4 +- .../proptests/proptest-regression-parser.py | 3 +- rs/workload_generator/plot.py | 1 - testnet/ansible/inventory/inventory.py | 2 +- testnet/host_inventory.py | 2 +- testnet/tools/icos_collect_debug_info.py | 1 + .../sysimage/build_container_base_image.py | 20 +- .../build_container_filesystem_tar.py | 123 +++++----- toolchains/sysimage/build_ext4_image.py | 50 +++- toolchains/sysimage/build_fat32_image.py | 12 +- toolchains/sysimage/build_lvm_image.py | 3 +- toolchains/sysimage/build_vfat_image.py | 12 +- toolchains/sysimage/container_utils.py | 15 +- 28 files changed, 400 insertions(+), 321 deletions(-) diff --git a/.github/scripts/determine-initial-guest-os-versions.py b/.github/scripts/determine-initial-guest-os-versions.py index 01c1f64c382..e5a87eb91ad 100644 --- a/.github/scripts/determine-initial-guest-os-versions.py +++ b/.github/scripts/determine-initial-guest-os-versions.py @@ -4,14 +4,15 @@ from typing import Any, Dict, List, Optional, TypedDict, cast from urllib.request import urlopen -ROLLOUT_DASHBOARD_ENDPOINT='https://rollout-dashboard.ch1-rel1.dfinity.network/api/v1/rollouts' -PUBLIC_DASHBOARD_ENDPOINT='https://ic-api.internetcomputer.org/api/v3/subnets?format=json' +ROLLOUT_DASHBOARD_ENDPOINT = "https://rollout-dashboard.ch1-rel1.dfinity.network/api/v1/rollouts" +PUBLIC_DASHBOARD_ENDPOINT = "https://ic-api.internetcomputer.org/api/v3/subnets?format=json" # Key definitions -EXECUTED_TIMESTAMP_SECONDS = 'executed_timestamp_seconds' -REPLICA_VERSIONS = 'replica_versions' -REPLICA_VERSION_ID = 'replica_version_id' -SUBNETS = 'subnets' +EXECUTED_TIMESTAMP_SECONDS = "executed_timestamp_seconds" +REPLICA_VERSIONS = "replica_versions" +REPLICA_VERSION_ID = "replica_version_id" +SUBNETS = "subnets" + # Minimal subset of API structure needed for rollout dashboard. # Always keep me in sync with https://github.com/dfinity/dre-airflow/blob/main/rollout-dashboard/server/src/types.rs @@ -28,11 +29,13 @@ class SubnetRolloutState(Enum): complete = "complete" unknown = "unknown" + class Subnet(TypedDict): subnet_id: str git_revision: str state: SubnetRolloutState + class Batch(TypedDict): subnets: List[Subnet] # The following three are dates but they are ISO UTF Z, @@ -41,6 +44,7 @@ class Batch(TypedDict): actual_start_time: Optional[str] end_time: Optional[str] + class RolloutState(Enum): complete = "complete" failed = "failed" @@ -50,11 +54,13 @@ class RolloutState(Enum): waiting = "waiting" problem = "problem" + class Rollout(TypedDict): name: str state: RolloutState batches: Dict[str, Batch] + # Minimal subset of API structure needed for public dashboard. # Swagger for the public dashboard API: https://ic-api.internetcomputer.org/api/v3/swagger . class PDReplicaVersion(TypedDict): @@ -62,20 +68,25 @@ class PDReplicaVersion(TypedDict): proposal_id: str # really an int replica_version_id: str + class PDSubnet(TypedDict): replica_versions: List[PDReplicaVersion] subnet_id: str + class PDSubnetsResponse(TypedDict): subnets: List[PDSubnet] + def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) + def eprint_fmt(str, *args): - return # remove me to get some real action + return # remove me to get some real action print((str % args) if args else str, file=sys.stderr) + def request_json(url: str) -> Any: resp = urlopen(url, timeout=15) if resp.status != 200: @@ -83,10 +94,14 @@ def request_json(url: str) -> Any: data = resp.read() except Exception: data = None - raise RuntimeError("Non-200 HTTP response (%s) from %s: %s" % (resp.status, url, data[:160] if data else "(no data in response)")) + raise RuntimeError( + "Non-200 HTTP response (%s) from %s: %s" + % (resp.status, url, data[:160] if data else "(no data in response)") + ) return json.load(resp) -def fetch_versions_from_rollout_dashboard(): # type: () -> list[str] | None + +def fetch_versions_from_rollout_dashboard(): # type: () -> list[str] | None """ Fetch data from rollout dashboard @@ -101,7 +116,7 @@ def fetch_versions_from_rollout_dashboard(): # type: () -> list[str] | None return [] # The value of the dict entry is datestring, git revision. - subnet_to_revision = {} # type: dict[str, list[tuple[str, str]]] + subnet_to_revision = {} # type: dict[str, list[tuple[str, str]]] for rollout in reversed(rollouts): # Oldest to newest for batch_num_ignored, batch in rollout["batches"].items(): @@ -119,7 +134,7 @@ def fetch_versions_from_rollout_dashboard(): # type: () -> list[str] | None subnet["git_revision"], subnet["subnet_id"], rollout["name"], - subnet["state"] + subnet["state"], ) continue else: @@ -128,7 +143,7 @@ def fetch_versions_from_rollout_dashboard(): # type: () -> list[str] | None subnet["git_revision"], subnet["subnet_id"], rollout["name"], - subnet["state"] + subnet["state"], ) t = batch.get("end_time") or batch.get("actual_start_time") or batch["planned_start_time"] if subnet["subnet_id"] not in subnet_to_revision: @@ -139,12 +154,17 @@ def fetch_versions_from_rollout_dashboard(): # type: () -> list[str] | None # Git revision coupled with the putative date or actual # finish date for the revision. Let's fish the latest # revision for each subnet, and get that. - return list(set([ - [revision for unused_date, revision in sorted(datestring_revision_tuple)][-1] - for datestring_revision_tuple in subnet_to_revision.values() - ])) + return list( + set( + [ + [revision for unused_date, revision in sorted(datestring_revision_tuple)][-1] + for datestring_revision_tuple in subnet_to_revision.values() + ] + ) + ) -def fetch_versions_from_public_dashboard(): # type: () -> list[str] | None + +def fetch_versions_from_public_dashboard(): # type: () -> list[str] | None """ Fetch data from public dashboard @@ -165,7 +185,7 @@ def fetch_versions_from_public_dashboard(): # type: () -> list[str] | None latest_replica_version = list( sorted( [r for r in subnet["replica_versions"] if r.get("executed_timestamp_seconds")], - key=lambda rr: rr.get("executed_timestamp_seconds") or 0 # the or 0 to satisfy py3.8 typechecking + key=lambda rr: rr.get("executed_timestamp_seconds") or 0, # the or 0 to satisfy py3.8 typechecking ) )[-1] versions.add(latest_replica_version["replica_version_id"]) @@ -174,6 +194,7 @@ def fetch_versions_from_public_dashboard(): # type: () -> list[str] | None return list(versions) + def main(): unique_versions = fetch_versions_from_rollout_dashboard() if not unique_versions: @@ -182,12 +203,13 @@ def main(): if not unique_versions: # At this moment if we don't have any starting version we cannot proceed - raise RuntimeError(f"Didn't find any versions from:\n\t1. {ROLLOUT_DASHBOARD_ENDPOINT}\n\t2. {PUBLIC_DASHBOARD_ENDPOINT}") + raise RuntimeError( + f"Didn't find any versions from:\n\t1. {ROLLOUT_DASHBOARD_ENDPOINT}\n\t2. {PUBLIC_DASHBOARD_ENDPOINT}" + ) eprint(f"Will qualify, starting from versions: {json.dumps(unique_versions)}") - matrix = { - "version": unique_versions - } + matrix = {"version": unique_versions} print(json.dumps(matrix)) + if __name__ == "__main__": main() diff --git a/.github/scripts/generate-ci.py b/.github/scripts/generate-ci.py index 40c94d07144..a517f697c2b 100644 --- a/.github/scripts/generate-ci.py +++ b/.github/scripts/generate-ci.py @@ -34,7 +34,7 @@ def main(): for file in os.listdir(workflows_source): if file.endswith(".yaml") or file.endswith(".yml"): input_file = workflows_source / file - output_file = workflows_output /file + output_file = workflows_output / file transform_yaml(input_file, output_file) diff --git a/bazel/candid_integration_tests/candid_integration_tests.py b/bazel/candid_integration_tests/candid_integration_tests.py index c6ae87b4801..2a382f0bed2 100644 --- a/bazel/candid_integration_tests/candid_integration_tests.py +++ b/bazel/candid_integration_tests/candid_integration_tests.py @@ -27,7 +27,7 @@ def modify_file_contents(path, find, replacement): f.write(new_contents) -def run_example_did_git_test(test_bin = "TEST_BIN"): +def run_example_did_git_test(test_bin="TEST_BIN"): return subprocess.run( [os.environ[test_bin]], env={ @@ -71,7 +71,7 @@ def test_remove_variants_check_fails(): def test_adding_new_did_file_succeeds(): - res = run_example_did_git_test(test_bin = "NEW_DID_TEST") + res = run_example_did_git_test(test_bin="NEW_DID_TEST") message = "is a new file, skipping backwards compatibility check" assert message in res.stdout.decode("utf-8") @@ -95,7 +95,7 @@ def test_remove_required_field_from_input_check_fails(): replacement="// Blank.", ) - res = run_example_did_git_test(test_bin = "TEST_BIN_ALSO_REVERSE") + res = run_example_did_git_test(test_bin="TEST_BIN_ALSO_REVERSE") error_message = "Method dance: func (DanceRequest) -> (DanceResponse) is not a subtype of func (DanceRequest/1) -> (DanceResponse/1)" assert error_message in res.stderr.decode("utf-8") @@ -110,7 +110,7 @@ def test_remove_required_field_from_output_check_fails(): replacement="// Blank.", ) - res = run_example_did_git_test(test_bin = "TEST_BIN_ALSO_REVERSE") + res = run_example_did_git_test(test_bin="TEST_BIN_ALSO_REVERSE") error_message = "Method dance: func (DanceRequest) -> (DanceResponse) is not a subtype of func (DanceRequest/1) -> (DanceResponse/1)" assert error_message in res.stderr.decode("utf-8") @@ -124,7 +124,7 @@ def test_adding_a_required_field_to_input_check_fails(): replacement="new_required_int : int;", ) - res = run_example_did_git_test(test_bin = "TEST_BIN_ALSO_REVERSE") + res = run_example_did_git_test(test_bin="TEST_BIN_ALSO_REVERSE") error_message = "Method dance: func (DanceRequest) -> (DanceResponse) is not a subtype of func (DanceRequest/1) -> (DanceResponse/1)" assert error_message in res.stderr.decode("utf-8") @@ -138,7 +138,7 @@ def test_adding_optional_field_succeeds(): replacement="new_optional_int : opt int;", ) - res = run_example_did_git_test(test_bin = "TEST_BIN_ALSO_REVERSE") + res = run_example_did_git_test(test_bin="TEST_BIN_ALSO_REVERSE") message = "bazel/candid_integration_tests/example.did passed candid checks" assert message in res.stdout.decode("utf-8") @@ -152,7 +152,7 @@ def test_adding_optional_field_reverse_succeeds(): replacement="new_optional_int : opt int;", ) - res = run_example_did_git_test(test_bin = "TEST_BIN_ALSO_REVERSE") + res = run_example_did_git_test(test_bin="TEST_BIN_ALSO_REVERSE") message = "bazel/candid_integration_tests/example.did passed candid checks" assert message in res.stdout.decode("utf-8") @@ -162,19 +162,16 @@ def test_adding_optional_field_reverse_succeeds(): def test_override_didc_checks_failing_check_succeeds(): modify_file_contents(path=did_file_path, find="happy; sad", replacement="happy") - res = run_example_did_git_test(test_bin = "TEST_BIN_ALSO_REVERSE") + res = run_example_did_git_test(test_bin="TEST_BIN_ALSO_REVERSE") error_message = "Method do_stuff: func (Request) -> () is not a subtype of func (Request/1) -> ()" assert error_message in res.stderr.decode("utf-8") assert res.returncode == 101 with mock.patch.dict(os.environ, {"OVERRIDE_DIDC_CHECK": "true"}): - res = run_example_did_git_test(test_bin = "TEST_BIN_ALSO_REVERSE") + res = run_example_did_git_test(test_bin="TEST_BIN_ALSO_REVERSE") assert res.returncode == 0 - assert ( - "Override didc check requested. Skipping didc_check." - in res.stdout.decode("utf-8") - ) + assert "Override didc check requested. Skipping didc_check." in res.stdout.decode("utf-8") if __name__ == "__main__": diff --git a/bazel/inject_version_into_wasm_tests/inspect_stamped_trivial_wasm.py b/bazel/inject_version_into_wasm_tests/inspect_stamped_trivial_wasm.py index 63cbe33356b..207e2bc74e3 100644 --- a/bazel/inject_version_into_wasm_tests/inspect_stamped_trivial_wasm.py +++ b/bazel/inject_version_into_wasm_tests/inspect_stamped_trivial_wasm.py @@ -12,6 +12,7 @@ stripped from both ends) contains the value that's supposed to be associated with the aforementioned WASM custom section. """ + import os import subprocess import unittest diff --git a/ci/src/git_changes/git_changes.py b/ci/src/git_changes/git_changes.py index 48e3a287cef..1b9f6644e07 100755 --- a/ci/src/git_changes/git_changes.py +++ b/ci/src/git_changes/git_changes.py @@ -10,6 +10,7 @@ Find changed files in the `rs` directory: python git_changes.py --changed-files-in-dirs rs """ + import argparse import functools import logging diff --git a/ci/src/git_changes/git_changes_test.py b/ci/src/git_changes/git_changes_test.py index f91ff0efce1..071f83f0caf 100644 --- a/ci/src/git_changes/git_changes_test.py +++ b/ci/src/git_changes/git_changes_test.py @@ -12,6 +12,7 @@ pytest """ + import os import shutil @@ -68,6 +69,7 @@ def setup_repo(tmpdir, testcase, branch="feature_branch"): if "CI_COMMIT_REF_NAME" in os.environ: del os.environ["CI_COMMIT_REF_NAME"] + @pytest.mark.fails_on_merge_train def test_change_one_file(tmpdir): """Tests that a commit has changed one crate.""" diff --git a/ic-os/components/conformance_tests/check_file_references.py b/ic-os/components/conformance_tests/check_file_references.py index cd91b8723e7..29947eb2e5d 100755 --- a/ic-os/components/conformance_tests/check_file_references.py +++ b/ic-os/components/conformance_tests/check_file_references.py @@ -43,14 +43,15 @@ def check_paths_in_source(source: str, partition_img_path: str) -> [str]: return [f"File '{source}' is not a valid file."] errors = [] - allowlisted_unavailable_dependencies = ALLOWED_UNDECLARED_DEPENDENCIES.get( - source, {}) + allowlisted_unavailable_dependencies = ALLOWED_UNDECLARED_DEPENDENCIES.get(source, {}) source_content = pathlib.Path(source).read_text() for dependency in allowlisted_unavailable_dependencies: if dependency not in source_content: - errors.append(f"{dependency} is in the allowlist of {source} even " - f"though {source} does not contain any references to " - f"it. Remove it from ALLOWED_UNDECLARED_DEPENDENCIES") + errors.append( + f"{dependency} is in the allowlist of {source} even " + f"though {source} does not contain any references to " + f"it. Remove it from ALLOWED_UNDECLARED_DEPENDENCIES" + ) for line_number, line in enumerate(source_content.splitlines(), start=1): dependencies = re.findall(COMPONENT_FILE_PATTERN, line) @@ -67,7 +68,8 @@ def check_paths_in_source(source: str, partition_img_path: str) -> [str]: errors.append( f"File '{source}' contains reference to " f"unavailable file '{dependency}' on line {line_number}. " - f"Ensure that {dependency} is added to the image.") + f"Ensure that {dependency} is added to the image." + ) print() return errors @@ -78,43 +80,36 @@ def exists(path, partition_img_path): `path`. """ debugfs_output = subprocess.run( - ["/usr/sbin/debugfs", "-R", f"testi {path}", partition_img_path], - check=True, - capture_output=True).stdout + ["/usr/sbin/debugfs", "-R", f"testi {path}", partition_img_path], check=True, capture_output=True + ).stdout return b"marked in use" in debugfs_output def main(): - parser = argparse.ArgumentParser( - description="Check files for allowed sources") - parser.add_argument('-f', '--files', required=True, - help="Comma-separated list of files to check") - parser.add_argument('-i', '--image', required=True, - help="Path to partition image") + parser = argparse.ArgumentParser(description="Check files for allowed sources") + parser.add_argument("-f", "--files", required=True, help="Comma-separated list of files to check") + parser.add_argument("-i", "--image", required=True, help="Path to partition image") args = parser.parse_args() - files = args.files.split(',') + files = args.files.split(",") tmpdir = tempfile.mkdtemp(prefix="icosbuild") atexit.register(lambda: subprocess.run(["rm", "-rf", tmpdir], check=True)) partition_tar_path = os.path.join(tmpdir, "partition.tar") - subprocess.run(["zstd", "-q", "--threads=0", "-f", "-d", args.image, "-o", - partition_tar_path], check=True) + subprocess.run(["zstd", "-q", "--threads=0", "-f", "-d", args.image, "-o", partition_tar_path], check=True) with tarfile.open(partition_tar_path) as tar: - partition_img = next( - (item for item in tar if item.path == "partition.img"), None) + partition_img = next((item for item in tar if item.path == "partition.img"), None) if not partition_img: return "partition.img not found in input image" tar.extract(partition_img, path=tmpdir) partition_img_path = os.path.join(tmpdir, "partition.img") - errors = list(itertools.chain.from_iterable( - check_paths_in_source(source, partition_img_path) for source in files)) + errors = list(itertools.chain.from_iterable(check_paths_in_source(source, partition_img_path) for source in files)) if errors: return "\nThe following problems were found:\n" + "\n".join(errors) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/ic-os/components/upgrade/shared-resources/monitor-expand-shared-data/monitor-expand-shared-data.py b/ic-os/components/upgrade/shared-resources/monitor-expand-shared-data/monitor-expand-shared-data.py index cdf51cb37f6..a1cf7c907ac 100644 --- a/ic-os/components/upgrade/shared-resources/monitor-expand-shared-data/monitor-expand-shared-data.py +++ b/ic-os/components/upgrade/shared-resources/monitor-expand-shared-data/monitor-expand-shared-data.py @@ -10,6 +10,7 @@ xfs and ext4 filesystems are supported by this script (even though only xfs is used at present). """ + import json import os import subprocess diff --git a/ic-os/dev-tools/bare_metal_deployment/deploy.py b/ic-os/dev-tools/bare_metal_deployment/deploy.py index 8710f57dd00..dbbbb5a2a78 100755 --- a/ic-os/dev-tools/bare_metal_deployment/deploy.py +++ b/ic-os/dev-tools/bare_metal_deployment/deploy.py @@ -120,7 +120,9 @@ class Args: benchmark_runner_script: Optional[str] = "./benchmark_runner.sh" # Paths to any benchmark tool scripts. - benchmark_tools: Optional[List[str]] = field(default_factory=lambda: ["../hw_validation/stress.sh", "../hw_validation/benchmark.sh"]) + benchmark_tools: Optional[List[str]] = field( + default_factory=lambda: ["../hw_validation/stress.sh", "../hw_validation/benchmark.sh"] + ) def __post_init__(self): assert self.upload_img is None or self.upload_img.endswith( @@ -133,21 +135,21 @@ def __post_init__(self): ), f"csv file must be specified via CLI or environment variable {BMC_INFO_ENV_VAR}" self.csv_filename = self.csv_filename or csv_filename_env_var - assert (self.inject_image_ipv6_prefix and self.inject_image_ipv6_gateway) or \ - not (self.inject_image_ipv6_prefix and self.inject_image_ipv6_gateway), \ - "Both ipv6_prefix and ipv6_gateway flags must be present or none" + assert (self.inject_image_ipv6_prefix and self.inject_image_ipv6_gateway) or not ( + self.inject_image_ipv6_prefix and self.inject_image_ipv6_gateway + ), "Both ipv6_prefix and ipv6_gateway flags must be present or none" if self.inject_image_ipv6_prefix: - assert self.inject_configuration_tool, \ - "setupos_inject_configuration tool required to modify image" - ipv4_args = [self.inject_image_ipv4_address, - self.inject_image_ipv4_gateway, - self.inject_image_ipv4_prefix_length, - self.inject_image_domain] - assert all(ipv4_args) or not any(ipv4_args), \ - "All ipv4 flags must be present or none" - assert self.file_share_ssh_key is None \ - or Path(self.file_share_ssh_key).exists(), \ - "File share ssh key path does not exist" + assert self.inject_configuration_tool, "setupos_inject_configuration tool required to modify image" + ipv4_args = [ + self.inject_image_ipv4_address, + self.inject_image_ipv4_gateway, + self.inject_image_ipv4_prefix_length, + self.inject_image_domain, + ] + assert all(ipv4_args) or not any(ipv4_args), "All ipv4 flags must be present or none" + assert ( + self.file_share_ssh_key is None or Path(self.file_share_ssh_key).exists() + ), "File share ssh key path does not exist" @dataclass(frozen=True) @@ -216,7 +218,7 @@ def parse_from_rows(rows: List[List[str]], network_image_url: str) -> List["BMCI def parse_from_csv_file(csv_filename: str, network_image_url: str) -> List["BMCInfo"]: with open(csv_filename, "r") as csv_file: - rows = [line.strip().split(',') for line in csv_file] + rows = [line.strip().split(",") for line in csv_file] return [parse_from_row(row, network_image_url) for row in rows] @@ -224,8 +226,7 @@ def assert_ssh_connectivity(target_url: str, ssh_key_file: Optional[Path]): ssh_key_arg = f"-i {ssh_key_file}" if ssh_key_file else "" ssh_opts = "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" result = invoke.run(f"ssh {ssh_opts} {ssh_key_arg} {target_url} 'echo Testing connection'", warn=True) - assert result and result.ok, \ - f"SSH connection test failed: {result.stderr.strip()}" + assert result and result.ok, f"SSH connection test failed: {result.stderr.strip()}" def get_url_content(url: str, timeout_secs: int = 1) -> Optional[str]: @@ -261,9 +262,7 @@ def check_guestos_metrics_version(ip_address: IPv6Address, timeout_secs: int) -> log.info("Got metrics result from GuestOS") guestos_version_line = next( - line - for line in metrics_output.splitlines() - if not line.startswith("#") and "guestos_version{" in line + line for line in metrics_output.splitlines() if not line.startswith("#") and "guestos_version{" in line ) log.info(f"GuestOS version metric: {guestos_version_line}") return True @@ -275,12 +274,14 @@ def wait(wait_secs: int) -> bool: def check_idrac_version(bmc_info: BMCInfo): - response = requests.get(f"https://{bmc_info.ip_address}/redfish/v1/Managers/iDRAC.Embedded.1?$select=FirmwareVersion", - verify=False, - auth=(bmc_info.username,bmc_info.password)) + response = requests.get( + f"https://{bmc_info.ip_address}/redfish/v1/Managers/iDRAC.Embedded.1?$select=FirmwareVersion", + verify=False, + auth=(bmc_info.username, bmc_info.password), + ) data = response.json() assert response.status_code == 200, "ERROR - Cannot get idrac version" - idrac_version = int(data["FirmwareVersion"].replace('.','')) + idrac_version = int(data["FirmwareVersion"].replace(".", "")) assert idrac_version >= 6000000, "ERROR - Old idrac version detected. Please update idrac version to >= 6" # todo - return or raise an error. @@ -292,15 +293,10 @@ class DeploymentError(Exception): def gen_failure(result: invoke.Result, bmc_info: BMCInfo) -> DeploymentError: error_msg = f"Failed on {result.command}: {result.stderr}" - return DeploymentError( - OperationResult(bmc_info, success=False, error_msg=error_msg) - ) + return DeploymentError(OperationResult(bmc_info, success=False, error_msg=error_msg)) -def run_script(idrac_script_dir: Path, - bmc_info: BMCInfo, - script_and_args: str, - permissive: bool = True) -> None: +def run_script(idrac_script_dir: Path, bmc_info: BMCInfo, script_and_args: str, permissive: bool = True) -> None: """Run a given script from the given bin dir and raise an exception if anything went wrong""" command = f"python3 {idrac_script_dir}/{script_and_args}" result = invoke.run(command) @@ -336,9 +332,7 @@ def deploy_server(bmc_info: BMCInfo, wait_time_mins: int, idrac_script_dir: Path check_idrac_version(bmc_info) configure_process_local_log(f"{bmc_info.ip_address}") - cli_creds = ( - f"-ip {bmc_info.ip_address} -u {bmc_info.username} -p {bmc_info.password}" - ) + cli_creds = f"-ip {bmc_info.ip_address} -u {bmc_info.username} -p {bmc_info.password}" # Keep state of if we attached the image network_image_attached = False @@ -364,14 +358,16 @@ def deploy_server(bmc_info: BMCInfo, wait_time_mins: int, idrac_script_dir: Path ) log.info("Attaching virtual media") run_func( - f"InsertEjectVirtualMediaREDFISH.py {cli_creds} --uripath {bmc_info.network_image_url} --action insert --index 1", permissive=False, + f"InsertEjectVirtualMediaREDFISH.py {cli_creds} --uripath {bmc_info.network_image_url} --action insert --index 1", + permissive=False, ) network_image_attached = True log.info("Setting next boot device to virtual floppy, and restarting") run_func( - f"SetNextOneTimeBootVirtualMediaDeviceOemREDFISH.py {cli_creds} --device 2", permissive=False, - ) # Device 2 for virtual Floppy + f"SetNextOneTimeBootVirtualMediaDeviceOemREDFISH.py {cli_creds} --device 2", + permissive=False, + ) # Device 2 for virtual Floppy log.info("Turning on machine") run_func( @@ -390,18 +386,12 @@ def check_connectivity_func() -> bool: assert bmc_info.guestos_ipv6_address is not None, "Logic error" return check_guestos_ping_connectivity( bmc_info.guestos_ipv6_address, timeout_secs - ) and check_guestos_metrics_version( - bmc_info.guestos_ipv6_address, timeout_secs - ) + ) and check_guestos_metrics_version(bmc_info.guestos_ipv6_address, timeout_secs) - iterate_func = ( - check_connectivity_func if bmc_info.guestos_ipv6_address else wait_func - ) + iterate_func = check_connectivity_func if bmc_info.guestos_ipv6_address else wait_func - log.info( - f"Machine booting. Checking on SetupOS completion periodically. Timeout (mins): {wait_time_mins}" - ) - for i in tqdm.tqdm(range(int(60 * (wait_time_mins / timeout_secs))),disable=DISABLE_PROGRESS_BAR): + log.info(f"Machine booting. Checking on SetupOS completion periodically. Timeout (mins): {wait_time_mins}") + for i in tqdm.tqdm(range(int(60 * (wait_time_mins / timeout_secs))), disable=DISABLE_PROGRESS_BAR): if iterate_func(): log.info("*** Deployment SUCCESS!") return OperationResult(bmc_info, success=True) @@ -421,9 +411,7 @@ def check_connectivity_func() -> bool: finally: if network_image_attached: try: - log.info( - "Ejecting the attached image so the next machine can boot from it" - ) + log.info("Ejecting the attached image so the next machine can boot from it") run_func( f"InsertEjectVirtualMediaREDFISH.py {cli_creds} --action eject --index 1", ) @@ -432,14 +420,10 @@ def check_connectivity_func() -> bool: return e.args[0] -def boot_images(bmc_infos: List[BMCInfo], - parallelism: int, - wait_time_mins: int, - idrac_script_dir: Path): +def boot_images(bmc_infos: List[BMCInfo], parallelism: int, wait_time_mins: int, idrac_script_dir: Path): results: List[OperationResult] = [] - arg_tuples = ((bmc_info, wait_time_mins, idrac_script_dir) \ - for bmc_info in bmc_infos) + arg_tuples = ((bmc_info, wait_time_mins, idrac_script_dir) for bmc_info in bmc_infos) with Pool(parallelism) as p: results = p.starmap(deploy_server, arg_tuples) @@ -459,7 +443,13 @@ def boot_images(bmc_infos: List[BMCInfo], return True -def benchmark_node(bmc_info: BMCInfo, benchmark_driver_script: str, benchmark_runner_script: str, benchmark_tools: List[str], file_share_ssh_key: Optional[str] = None): +def benchmark_node( + bmc_info: BMCInfo, + benchmark_driver_script: str, + benchmark_runner_script: str, + benchmark_tools: List[str], + file_share_ssh_key: Optional[str] = None, +): log.info("Benchmarking machine.") ip_address = bmc_info.guestos_ipv6_address @@ -467,20 +457,27 @@ def benchmark_node(bmc_info: BMCInfo, benchmark_driver_script: str, benchmark_ru benchmark_tools = " ".join(benchmark_tools) if benchmark_tools is not None else "" # Throw away the result, for now - invoke.run(f"{benchmark_driver_script} {benchmark_runner_script} {file_share_ssh_key} {ip_address} {benchmark_tools}", warn=True) + invoke.run( + f"{benchmark_driver_script} {benchmark_runner_script} {file_share_ssh_key} {ip_address} {benchmark_tools}", + warn=True, + ) return OperationResult(bmc_info, success=True) -def benchmark_nodes(bmc_infos: List[BMCInfo], - parallelism: int, - benchmark_driver_script: str, - benchmark_runner_script: str, - benchmark_tools: List[str], - file_share_ssh_key: Optional[str] = None): +def benchmark_nodes( + bmc_infos: List[BMCInfo], + parallelism: int, + benchmark_driver_script: str, + benchmark_runner_script: str, + benchmark_tools: List[str], + file_share_ssh_key: Optional[str] = None, +): results: List[OperationResult] = [] - arg_tuples = ((bmc_info, benchmark_driver_script, benchmark_runner_script, benchmark_tools, file_share_ssh_key) \ - for bmc_info in bmc_infos) + arg_tuples = ( + (bmc_info, benchmark_driver_script, benchmark_runner_script, benchmark_tools, file_share_ssh_key) + for bmc_info in bmc_infos + ) with Pool(parallelism) as p: results = p.starmap(benchmark_node, arg_tuples) @@ -500,11 +497,8 @@ def benchmark_nodes(bmc_infos: List[BMCInfo], return True -def create_file_share_endpoint(file_share_url: str, - file_share_username: Optional[str]) -> str: - return file_share_url \ - if file_share_username is None \ - else f"{file_share_username}@{file_share_url}" +def create_file_share_endpoint(file_share_url: str, file_share_username: Optional[str]) -> str: + return file_share_url if file_share_username is None else f"{file_share_username}@{file_share_url}" def upload_to_file_share( @@ -517,24 +511,24 @@ def upload_to_file_share( log.info(f'''Uploading "{upload_img}" to "{file_share_endpoint}"''') connect_kw_args = {"key_filename": file_share_ssh_key} if file_share_ssh_key else None - conn = fabric.Connection(host=file_share_endpoint, - connect_kwargs=connect_kw_args) + conn = fabric.Connection(host=file_share_endpoint, connect_kwargs=connect_kw_args) tmp_dir = None try: result = conn.run("mktemp --directory", hide="both", echo=True) tmp_dir = str.strip(result.stdout) # scp is faster than fabric's built-in transfer. ssh_key_arg = f"-i {file_share_ssh_key}" if file_share_ssh_key else "" - invoke.run(f"scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {ssh_key_arg} {upload_img} {file_share_endpoint}:{tmp_dir}", echo=True, pty=True) + invoke.run( + f"scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {ssh_key_arg} {upload_img} {file_share_endpoint}:{tmp_dir}", + echo=True, + pty=True, + ) upload_img_filename = upload_img.name # Decompress in place. disk.img should appear in the same directory conn.run(f"tar --extract --zstd --file {tmp_dir}/{upload_img_filename} --directory {tmp_dir}", echo=True) image_destination = f"/{file_share_dir}/{file_share_image_name}" - conn.run( - f"mv {tmp_dir}/disk.img {image_destination}", - echo=True - ) + conn.run(f"mv {tmp_dir}/disk.img {image_destination}", echo=True) conn.run(f"chmod a+r {image_destination}", echo=True) finally: # Clean up remote dir @@ -544,14 +538,16 @@ def upload_to_file_share( log.info(f"Image ready at {file_share_endpoint}:/{file_share_dir}/{file_share_image_name}") -def inject_config_into_image(setupos_inject_configuration_path: Path, - working_dir: Path, - compressed_image_path: Path, - ipv6_prefix: str, - ipv6_gateway: str, - ipv4_args: Optional[Ipv4Args], - verbose: Optional[str], - pub_key: Optional[str]) -> Path: +def inject_config_into_image( + setupos_inject_configuration_path: Path, + working_dir: Path, + compressed_image_path: Path, + ipv6_prefix: str, + ipv6_gateway: str, + ipv4_args: Optional[Ipv4Args], + verbose: Optional[str], + pub_key: Optional[str], +) -> Path: """ Transform the compressed image. * Decompress image into working_dir @@ -562,10 +558,11 @@ def inject_config_into_image(setupos_inject_configuration_path: Path, """ assert working_dir.is_dir() assert compressed_image_path.exists() + def is_executable(p: Path) -> bool: return os.access(p, os.X_OK) - assert setupos_inject_configuration_path.exists() and \ - is_executable(setupos_inject_configuration_path) + + assert setupos_inject_configuration_path.exists() and is_executable(setupos_inject_configuration_path) invoke.run(f"tar --extract --zstd --file {compressed_image_path} --directory {working_dir}", echo=True) @@ -588,9 +585,12 @@ def is_executable(p: Path) -> bool: admin_key_part = "" if pub_key: - admin_key_part = f"--public-keys \"{pub_key}\"" + admin_key_part = f'--public-keys "{pub_key}"' - invoke.run(f"{setupos_inject_configuration_path} {image_part} {prefix_part} {gateway_part} {ipv4_part} {verbose_part} {admin_key_part}", echo=True) + invoke.run( + f"{setupos_inject_configuration_path} {image_part} {prefix_part} {gateway_part} {ipv4_part} {verbose_part} {admin_key_part}", + echo=True, + ) # Reuse the name of the compressed image path in the working directory result_filename = compressed_image_path.name @@ -602,13 +602,11 @@ def is_executable(p: Path) -> bool: def main(): print(sys.argv) - args: Args = parse(Args, add_config_path_arg=True) # Parse from config file too + args: Args = parse(Args, add_config_path_arg=True) # Parse from config file too - DISABLE_PROGRESS_BAR = args.ci_mode # noqa - ruff format wants to erroneously delete this + DISABLE_PROGRESS_BAR = args.ci_mode # noqa - ruff format wants to erroneously delete this - network_image_url: str = ( - f"http://{args.file_share_url}/{args.file_share_image_filename}" - ) + network_image_url: str = f"http://{args.file_share_url}/{args.file_share_image_filename}" log.info(f"Using network_image_url: {network_image_url}") idrac_script_dir = Path(args.idrac_script).parent if args.idrac_script else Path(DEFAULT_IDRAC_SCRIPT_DIR) @@ -619,10 +617,12 @@ def main(): ipv4_args = None if args.inject_image_ipv4_address: - ipv4_args = Ipv4Args(args.inject_image_ipv4_address, - args.inject_image_ipv4_gateway, - args.inject_image_ipv4_prefix_length, - args.inject_image_domain) + ipv4_args = Ipv4Args( + args.inject_image_ipv4_address, + args.inject_image_ipv4_gateway, + args.inject_image_ipv4_prefix_length, + args.inject_image_domain, + ) # Benchmark these nodes, rather than deploy them. if args.benchmark: @@ -656,15 +656,16 @@ def main(): args.inject_image_ipv6_gateway, ipv4_args, args.inject_image_verbose, - args.inject_image_pub_key - ) + args.inject_image_pub_key, + ) upload_to_file_share( modified_image_path, file_share_endpoint, args.file_share_dir, args.file_share_image_filename, - args.file_share_ssh_key) + args.file_share_ssh_key, + ) elif args.upload_img: upload_to_file_share( @@ -672,15 +673,13 @@ def main(): file_share_endpoint, args.file_share_dir, args.file_share_image_filename, - args.file_share_ssh_key) + args.file_share_ssh_key, + ) wait_time_mins = args.wait_time parallelism = args.parallel success = boot_images( - bmc_infos=bmc_infos, - parallelism=parallelism, - wait_time_mins=wait_time_mins, - idrac_script_dir=idrac_script_dir + bmc_infos=bmc_infos, parallelism=parallelism, wait_time_mins=wait_time_mins, idrac_script_dir=idrac_script_dir ) if not success: diff --git a/ic-os/dev-tools/power-metric-verification/parse_voltage_readings.py b/ic-os/dev-tools/power-metric-verification/parse_voltage_readings.py index 8c7590245a6..282ac425c0f 100644 --- a/ic-os/dev-tools/power-metric-verification/parse_voltage_readings.py +++ b/ic-os/dev-tools/power-metric-verification/parse_voltage_readings.py @@ -4,17 +4,17 @@ voltage_readings = sys.argv[1] -with open(voltage_readings, 'r') as in_file, open('voltage_readings.csv', 'w', newline='') as out_file: +with open(voltage_readings, "r") as in_file, open("voltage_readings.csv", "w", newline="") as out_file: writer = csv.writer(out_file) - writer.writerow(['Time', 'Instantaneous voltage reading']) + writer.writerow(["Time", "Instantaneous voltage reading"]) lines = in_file.readlines() for line in lines: - if line.startswith(' Instantaneous power reading:'): - voltage_reading = re.search(r'\d+', line).group() + if line.startswith(" Instantaneous power reading:"): + voltage_reading = re.search(r"\d+", line).group() - elif line.startswith(' IPMI timestamp:'): - time = re.search(r'\d{2}:\d{2}:\d{2}', line).group() + elif line.startswith(" IPMI timestamp:"): + time = re.search(r"\d{2}:\d{2}:\d{2}", line).group() writer.writerow([time, voltage_reading]) diff --git a/pre-commit/ruff-format.sh b/pre-commit/ruff-format.sh index 08e37010d55..585dcf1de41 100755 --- a/pre-commit/ruff-format.sh +++ b/pre-commit/ruff-format.sh @@ -7,3 +7,4 @@ REPO_PATH="$(dirname "$(readlink "$WORKSPACE")")" cd "$REPO_PATH" "$RUFF_PATH" check . --fix +"$RUFF_PATH" format . diff --git a/pre-commit/ruff-lint.sh b/pre-commit/ruff-lint.sh index 87bf2ce783c..b7444247fca 100755 --- a/pre-commit/ruff-lint.sh +++ b/pre-commit/ruff-lint.sh @@ -6,7 +6,7 @@ RUFF_PATH="$(readlink "$ruff_path")" REPO_PATH="$(dirname "$(readlink "$WORKSPACE")")" cd "$REPO_PATH" -if ! "$RUFF_PATH" check . -q; then +if ! "$RUFF_PATH" check . -q || ! "$RUFF_PATH" format . --check -q; then cat >&2 <= 2 error_msg = error_msg.split("\n") if error_msg[1] == "No tests available.": - return [] assert error_msg[1] == "Available tests:" pkg_exes = list(map(lambda pkg_exe: pkg_exe.lstrip(" "), error_msg[2:-3])) diff --git a/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/scripts/cost_estimator.py b/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/scripts/cost_estimator.py index 37f97007031..9172c2a9782 100755 --- a/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/scripts/cost_estimator.py +++ b/rs/crypto/internal/crypto_lib/threshold_sig/bls12_381/scripts/cost_estimator.py @@ -51,32 +51,28 @@ import operator as op -def cost(group, op, n = 1): - assert(n >= 1) +def cost(group, op, n=1): + assert n >= 1 # all costs are in microseconds costs = { - 'g1': { - 'mul': 276, - 'mul2': 360, - 'hash': 110, - 'serialize': 29, - 'deserialize': 113, + "g1": { + "mul": 276, + "mul2": 360, + "hash": 110, + "serialize": 29, + "deserialize": 113, }, - 'g2': { - 'mul': 835, - 'serialize': 34, - 'deserialize': 410, + "g2": { + "mul": 835, + "serialize": 34, + "deserialize": 410, }, - 'gt': { - 'pair4': 2253, - 'search16': 300, - 'add': 5 - } + "gt": {"pair4": 2253, "search16": 300, "add": 5}, } muln_costs = { - 'g1': { + "g1": { 2: 268, 4: 534, 8: 1068, @@ -90,7 +86,7 @@ def cost(group, op, n = 1): 128: 7958, 256: 14364, }, - 'g2': { + "g2": { 2: 845, 4: 1711, 8: 3485, @@ -103,13 +99,13 @@ def cost(group, op, n = 1): 96: 21738, 128: 27324, 256: 48344, - } + }, } - if op == 'muln_sparse': - return int(0.1 * cost(group, 'muln', n)) + if op == "muln_sparse": + return int(0.1 * cost(group, "muln", n)) - if op == 'muln': + if op == "muln": if group in muln_costs: avail = muln_costs[group].keys() @@ -121,10 +117,11 @@ def cost(group, op, n = 1): return int(n * (muln_costs[group][closest] / closest)) else: # just assume naive mul - return cost(group, 'mul', n) + return cost(group, "mul", n) return n * costs[group][op] + class Time(object): def __init__(self, n): self.val = n @@ -163,6 +160,7 @@ def __str__(self): hours = minutes / 60 return "%.02f hours" % (hours) + class Bytes(object): def __init__(self, n): if isinstance(n, int): @@ -185,11 +183,12 @@ def __rmul__(self, o): def __str__(self): bytes = self.val - if bytes >= 1024*1024: - return "%.02f MiB" % (bytes/(1024*1024)) + if bytes >= 1024 * 1024: + return "%.02f MiB" % (bytes / (1024 * 1024)) return "%d bytes" % (bytes) + class NidkgCosts(object): def __init__(self): self.params = {} @@ -198,12 +197,12 @@ def set_var(self, nm, expr): self.params[nm] = expr def parse_vars(self, str): - for line in str.split('\n'): - if line == '' or line.startswith('#'): + for line in str.split("\n"): + if line == "" or line.startswith("#"): continue try: - (k,v) = line.split(' = ') + (k, v) = line.split(" = ") self.set_var(k, v) except ValueError: print("Failed to parse '%s' as key = val" % (line)) @@ -222,55 +221,55 @@ def match_prefix(self, prefix): def eval(self, nm): expr = self.params[nm] - return self._eval(ast.parse(expr, mode='eval').body) + return self._eval(ast.parse(expr, mode="eval").body) def eval_all(self): results = [] for nm in self.params: expr = self.params[nm] - val = self._eval(ast.parse(expr, mode='eval').body) + val = self._eval(ast.parse(expr, mode="eval").body) results.append((nm, val)) return results def _eval(self, node): - - operators = {ast.Add: op.add, - ast.Sub: op.sub, - ast.Mult: op.mul, - ast.FloorDiv: op.floordiv, - ast.Div: op.truediv, - ast.Pow: op.pow, - ast.USub: op.neg + operators = { + ast.Add: op.add, + ast.Sub: op.sub, + ast.Mult: op.mul, + ast.FloorDiv: op.floordiv, + ast.Div: op.truediv, + ast.Pow: op.pow, + ast.USub: op.neg, } if isinstance(node, ast.Num): return node.n - elif isinstance(node, ast.BinOp): # + elif isinstance(node, ast.BinOp): # return operators[type(node.op)](self._eval(node.left), self._eval(node.right)) elif isinstance(node, ast.Name): val = self.eval(node.id) - if node.id.endswith('_bytes'): + if node.id.endswith("_bytes"): return Bytes(val) else: return val elif isinstance(node, ast.Call): if node.func.id == "pow2": - assert(len(node.args) == 1) + assert len(node.args) == 1 val = self._eval(node.args[0]) return (1 << val) - 1 if node.func.id == "ceil": - assert(len(node.args) == 1) + assert len(node.args) == 1 val = self._eval(node.args[0]) return math.ceil(val) if node.func.id == "sqrt": - assert(len(node.args) == 1) + assert len(node.args) == 1 val = self._eval(node.args[0]) return math.ceil(math.sqrt(val)) elif node.func.id == "cost": - assert(len(node.args) == 2 or len(node.args) == 3) + assert len(node.args) == 2 or len(node.args) == 3 group = node.args[0].id oper = node.args[1].id - n = 1 # default + n = 1 # default if len(node.args) == 3: n = self._eval(node.args[2]) @@ -281,6 +280,7 @@ def _eval(self, node): else: raise Exception("Bad expression") + nidkg_expr = """ security_level = 256 g1_bytes = 48 @@ -364,6 +364,7 @@ def _eval(self, node): fs_decryption_worst_cost = fs_decryption_usual_cost + cheating_dealer_setup_cost + number_of_chunks*cheating_dealer_search_cost """ + class Repl(cmd.Cmd, object): intro = "Welcome to NIDKG cost estimator" prompt = "> " @@ -377,7 +378,7 @@ def __init__(self, nidkg_expr): def do_eval(self, arg): """Evaluate an expression""" try: - for v in arg.split(' '): + for v in arg.split(" "): for f in self.rules.match_prefix(v): print("%s = %s" % (f, self.rules.eval(f))) except KeyError as e: @@ -388,7 +389,7 @@ def complete_eval(self, text, line, begidx, endidx): def do_eval_all(self, arg): """Evaluate all stored expressions""" - for (key,val) in self.rules.eval_all(): + for key, val in self.rules.eval_all(): print("%s = %s" % (key, val)) def do_set(self, arg): @@ -400,7 +401,7 @@ def complete_set(self, text, line, begidx, endidx): def do_keys(self, arg): """List stored expressions (with optional prefix matching)""" - for v in arg.split(' '): + for v in arg.split(" "): for f in self.rules.match_prefix(v): print("%s = %s" % (f, self.rules.expr(f))) @@ -416,5 +417,6 @@ def do_EOF(self, arg): print("\nGoodbye") return True + if __name__ == "__main__": Repl(nidkg_expr).cmdloop() diff --git a/rs/ethereum/cketh/minter/scripts/ofaq_blocklist.py b/rs/ethereum/cketh/minter/scripts/ofaq_blocklist.py index 0d921c71797..2ac0c647943 100644 --- a/rs/ethereum/cketh/minter/scripts/ofaq_blocklist.py +++ b/rs/ethereum/cketh/minter/scripts/ofaq_blocklist.py @@ -12,12 +12,12 @@ # Filter lines containing "Digital Currency Address - ETH" and "0x" filtered_lines = [line for line in lines if "Digital Currency Address - ETH" and "0x" in line] - eth_address_pattern = r'0x[a-fA-F0-9]{40}' + eth_address_pattern = r"0x[a-fA-F0-9]{40}" eth_addresses = [address for address in filtered_lines if re.match(eth_address_pattern, address)] unique_eth_addresses = list(set(eth_addresses)) for line in unique_eth_addresses: - print(line.split(';')[0]) + print(line.split(";")[0]) print("Found", len(unique_eth_addresses), "addresses in the OFAC SDN list") else: print("Failed to fetch data. Status code:", response.status_code) diff --git a/rs/tests/proptests/proptest-regression-parser.py b/rs/tests/proptests/proptest-regression-parser.py index cdac898a9a6..1242442337a 100644 --- a/rs/tests/proptests/proptest-regression-parser.py +++ b/rs/tests/proptests/proptest-regression-parser.py @@ -3,6 +3,7 @@ A collection of helper functions that parse and analyse proptest-regression files to help identify the issue with a regression. """ + import argparse import re @@ -46,12 +47,10 @@ def shrink_decimal_array(data): def shrink_arrays(data): - chunks = re.split(r"\[|\]", data) output = "" for chunk in chunks: - if is_decimal_array(chunk): output += "[" output += shrink_decimal_array(chunk) diff --git a/rs/workload_generator/plot.py b/rs/workload_generator/plot.py index 471d22c768c..1b413c2f93b 100755 --- a/rs/workload_generator/plot.py +++ b/rs/workload_generator/plot.py @@ -18,7 +18,6 @@ def get_numbers(filename): def plot(results, title, outname): - fig, ax = plt.subplots() ax.plot([x for x, _ in results], [y for _, y in results], label=title, marker="o") diff --git a/testnet/ansible/inventory/inventory.py b/testnet/ansible/inventory/inventory.py index f79e71ef4f1..ca62145e62c 100755 --- a/testnet/ansible/inventory/inventory.py +++ b/testnet/ansible/inventory/inventory.py @@ -214,7 +214,7 @@ def _host_patch_vars(self, host): # That didn't work, try to build IPv6 from the MAC address if ic_host: ipv6_prefix = self._get_ipv6_prefix_for_ic_host(ic_host) - ipv6_subnet = '/64' + ipv6_subnet = "/64" # For the mainnet deployments, the MAC address is calculated based on the number of guests on # the physical host, so we need to enumerate and count the guests on each physical host. # Assign a unique ID to each physical host. This will be a serial number if diff --git a/testnet/host_inventory.py b/testnet/host_inventory.py index d67de876ebd..67a82554c89 100755 --- a/testnet/host_inventory.py +++ b/testnet/host_inventory.py @@ -33,7 +33,7 @@ def main(argv): def host_files(p): - for (root, dirs, files) in walk(p): + for root, dirs, files in walk(p): for f in filter(lambda f: f.endswith(".ini"), files): yield path.join(root, f) diff --git a/testnet/tools/icos_collect_debug_info.py b/testnet/tools/icos_collect_debug_info.py index db6dd461cc1..0b748535c05 100755 --- a/testnet/tools/icos_collect_debug_info.py +++ b/testnet/tools/icos_collect_debug_info.py @@ -10,6 +10,7 @@ ${REPO_ROOT}/ci/src/artifacts/gitlab_artifacts_download.py --job-id """ + import argparse import json import logging diff --git a/toolchains/sysimage/build_container_base_image.py b/toolchains/sysimage/build_container_base_image.py index a07c95da42e..971a2fcaad4 100644 --- a/toolchains/sysimage/build_container_base_image.py +++ b/toolchains/sysimage/build_container_base_image.py @@ -54,8 +54,8 @@ def __post_init__(self): def build_image(container_cmd: str, image_tag: str, dockerfile: str, context_dir: str, build_args: List[str]): - build_arg_strings = [f"--build-arg \"{v}\"" for v in build_args] - build_arg_strings_joined = ' '.join(build_arg_strings) + build_arg_strings = [f'--build-arg "{v}"' for v in build_args] + build_arg_strings_joined = " ".join(build_arg_strings) log.info("Building image...") cmd = f"{container_cmd} build --squash-all --no-cache --tag {image_tag} {build_arg_strings_joined} --file {dockerfile} {context_dir}" @@ -67,12 +67,11 @@ def save_image(container_cmd: str, image_tag: str, output_file: str): log.info("Saving image to tar file") cmd = f"{container_cmd} image save --output {output_file} {image_tag}" invoke.run(cmd) - invoke.run("sync") # For determinism (?) + invoke.run("sync") # For determinism (?) # Using sudo w/ podman requires changing permissions on the output tar file (not the tar contents) output_path = Path(output_file) - assert path_owned_by_root(output_path), \ - f"'{output_path}' not owned by root. Remove this and the next line." + assert path_owned_by_root(output_path), f"'{output_path}' not owned by root. Remove this and the next line." take_ownership_of_file(output_path) assert output_path.exists() @@ -85,7 +84,14 @@ def save_image(container_cmd: str, image_tag: str, output_file: str): def main(): parser = ArgumentParser() parser.add_arguments(Args, dest="fancy") - parser.add_argument("--context-file", dest="context_files", type=Path, action="append", help="Files to drop directly into the build context.", required=True) + parser.add_argument( + "--context-file", + dest="context_files", + type=Path, + action="append", + help="Files to drop directly into the build context.", + required=True, + ) args = parser.parse_args() log.info(f"Using args: {args}") @@ -104,7 +110,7 @@ def main(): build_image(container_cmd, args.fancy.image_tag, args.fancy.dockerfile, context_dir, build_args) save_image(container_cmd, args.fancy.image_tag, args.fancy.output) - remove_image(container_cmd, args.fancy.image_tag) # No harm removing if in the tmp dir + remove_image(container_cmd, args.fancy.image_tag) # No harm removing if in the tmp dir if __name__ == "__main__": diff --git a/toolchains/sysimage/build_container_filesystem_tar.py b/toolchains/sysimage/build_container_filesystem_tar.py index c03ff7d63a5..a3d744727c7 100755 --- a/toolchains/sysimage/build_container_filesystem_tar.py +++ b/toolchains/sysimage/build_container_filesystem_tar.py @@ -33,8 +33,10 @@ def __post_init__(self): assert self.image_file.exists() -ReturnType = TypeVar('ReturnType') # https://docs.python.org/3/library/typing.html#generics -def retry(func: Callable[[], ReturnType], num_retries: int = 3 ) -> ReturnType: +ReturnType = TypeVar("ReturnType") # https://docs.python.org/3/library/typing.html#generics + + +def retry(func: Callable[[], ReturnType], num_retries: int = 3) -> ReturnType: """ Call the given `func`. If an exception is raised, print, and retry `num_retries` times. Back off retries by sleeping for at least 5 secs + an exponential increase. @@ -49,7 +51,7 @@ def retry(func: Callable[[], ReturnType], num_retries: int = 3 ) -> ReturnType: print(f"Retries left: {num_retries - i}", file=sys.stderr) wait_time_secs = BASE_BACKOFF_WAIT_SECS + i**2 print(f"Waiting for next retry (secs): {wait_time_secs}") - time.sleep(wait_time_secs) # 5, 6, 9, 14, 21, etc. + time.sleep(wait_time_secs) # 5, 6, 9, 14, 21, etc. # Let the final try actually throw return func() @@ -75,18 +77,20 @@ def arrange_component_files(context_dir, component_files): shutil.copy(source_file, install_target) -def build_container(container_cmd: str, - build_args: List[str], - context_dir: str, - dockerfile: str, - image_tag: str, - no_cache: bool, - base_image_override: Optional[BaseImageOverride]) -> str: +def build_container( + container_cmd: str, + build_args: List[str], + context_dir: str, + dockerfile: str, + image_tag: str, + no_cache: bool, + base_image_override: Optional[BaseImageOverride], +) -> str: """Run container build command with given args. Return the given tag.""" assert image_tag and context_dir, "Arguments can not be empty" - build_arg_strings = [f"--build-arg \"{v}\"" for v in build_args] - build_arg_strings_joined = ' '.join(build_arg_strings) + build_arg_strings = [f'--build-arg "{v}"' for v in build_args] + build_arg_strings_joined = " ".join(build_arg_strings) cmd = f"{container_cmd} " cmd += "build " @@ -111,15 +115,15 @@ def build_container(container_cmd: str, # Context must go last cmd += f"{context_dir} " print(cmd) + def build_func(): - invoke.run(cmd) # Throws on failure + invoke.run(cmd) # Throws on failure + retry(build_func) return image_tag -def export_container_filesystem(container_cmd: str, - image_tag: str, - destination_tar_filename: str): +def export_container_filesystem(container_cmd: str, image_tag: str, destination_tar_filename: str): """ Export the filesystem from an image. Creates container - but does not start it, avoiding timestamp and other determinism issues. @@ -132,8 +136,9 @@ def export_container_filesystem(container_cmd: str, destination_tar_path = Path(destination_tar_filename) # Using sudo w/ podman requires changing permissions on the output tar file (not the tar contents) - assert path_owned_by_root(destination_tar_path), \ - f"'{destination_tar_path}' not owned by root. Remove this and the next line." + assert path_owned_by_root( + destination_tar_path + ), f"'{destination_tar_path}' not owned by root. Remove this and the next line." take_ownership_of_file(destination_tar_path) @@ -147,7 +152,7 @@ def resolve_file_args(context_dir: str, file_build_args: List[str]) -> List[str] path = Path(context_dir) / pathname - with open(path, 'r') as f: + with open(path, "r") as f: value = f.readline().strip() result.append(f"{name}={value}") @@ -159,31 +164,22 @@ def generate_image_tag(base: str) -> str: # See the (unwieldy) format spec: # https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pulling-manifests # Replace disallowed chars with dashes - return base.translate(str.maketrans({'/': '-', '.':'-', ':':'-'})) - - -def build_and_export(container_cmd: str, - build_args: List[str], - context_dir: str, - dockerfile: str, - image_tag: str, - no_cache: bool, - base_image_override: Optional[BaseImageOverride], - destination_tar_filename: str) -> None: - - build_container(container_cmd, - build_args, - context_dir, - dockerfile, - image_tag, - no_cache, - base_image_override) + return base.translate(str.maketrans({"/": "-", ".": "-", ":": "-"})) - export_container_filesystem(container_cmd, - image_tag, - destination_tar_filename) +def build_and_export( + container_cmd: str, + build_args: List[str], + context_dir: str, + dockerfile: str, + image_tag: str, + no_cache: bool, + base_image_override: Optional[BaseImageOverride], + destination_tar_filename: str, +) -> None: + build_container(container_cmd, build_args, context_dir, dockerfile, image_tag, no_cache, base_image_override) + export_container_filesystem(container_cmd, image_tag, destination_tar_filename) def get_args(): @@ -228,7 +224,7 @@ def get_args(): type=str, action="append", help="Files to drop directly into the build context.", - required=True + required=True, ) parser.add_argument( @@ -237,14 +233,14 @@ def get_args(): type=str, action="append", help="Files to include in rootfs; expects list of sourcefile:targetfile", - required=True + required=True, ) parser.add_argument( "--no-cache", help="By default the container builds using the image layer cache. Turn this on to prevent using the cache. Cache usage causes instability with parallel builds. It can be mitigated by addressing at a layer above this.", default=False, - action="store_true" + action="store_true", ) parser.add_argument( @@ -257,14 +253,15 @@ def get_args(): "--tmpfs-container-sys-dir", help="Create and mount a tmpfs to store its system files. It will be unmounted before exiting.", default=False, - action="store_true" + action="store_true", ) parser.add_argument( "--base-image-tar-file", help="Override the base image used by 'podman build'. The 'FROM' line in the target Dockerfile will be ignored", default=None, - type=str) + type=str, + ) # Need the image tag to identify the right image. # `podman load` puts the image into the local image registry directly and labels it with the image tag used during `podman build` @@ -272,7 +269,8 @@ def get_args(): "--base-image-tar-file-tag", help="Tag given to the container image during 'podman build'. Required if --base-image-tar-file is used.", default=None, - type=str) + type=str, + ) return parser.parse_args() @@ -307,26 +305,29 @@ def main(): build_args.extend(resolved_file_args) # Override the base image with a local tar file? - def only_one_defined(a,b) -> bool: + def only_one_defined(a, b) -> bool: return (a and not b) or (b and not a) - assert not only_one_defined(args.base_image_tar_file, args.base_image_tar_file_tag), \ - "Please specify BOTH --base-image-tar-file* flags" + + assert not only_one_defined( + args.base_image_tar_file, args.base_image_tar_file_tag + ), "Please specify BOTH --base-image-tar-file* flags" base_image_override = None if args.base_image_tar_file: - base_image_override = BaseImageOverride(Path(args.base_image_tar_file), - args.base_image_tar_file_tag) + base_image_override = BaseImageOverride(Path(args.base_image_tar_file), args.base_image_tar_file_tag) container_cmd = generate_container_command("sudo podman ", temp_sys_dir) - build_and_export(container_cmd, - build_args, - context_dir, - args.dockerfile, - image_tag, - no_cache, - base_image_override, - destination_tar_filename) - remove_image(container_cmd, image_tag) # No harm removing if in the tmp dir + build_and_export( + container_cmd, + build_args, + context_dir, + args.dockerfile, + image_tag, + no_cache, + base_image_override, + destination_tar_filename, + ) + remove_image(container_cmd, image_tag) # No harm removing if in the tmp dir if __name__ == "__main__": diff --git a/toolchains/sysimage/build_ext4_image.py b/toolchains/sysimage/build_ext4_image.py index a03d0a0c489..a937b446dda 100755 --- a/toolchains/sysimage/build_ext4_image.py +++ b/toolchains/sysimage/build_ext4_image.py @@ -99,9 +99,10 @@ def strip_files(fs_basedir, fakeroot_statefile, strip_paths): for batch_start in range(0, len(flattened_paths), BATCH_SIZE): batch_end = min(batch_start + BATCH_SIZE, len(flattened_paths)) subprocess.run( - ["fakeroot", "-s", fakeroot_statefile, "-i", fakeroot_statefile, "rm", "-rf"] + - flattened_paths[batch_start:batch_end], - check=True) + ["fakeroot", "-s", fakeroot_statefile, "-i", fakeroot_statefile, "rm", "-rf"] + + flattened_paths[batch_start:batch_end], + check=True, + ) def prepare_tree_from_tar(in_file, fakeroot_statefile, fs_basedir, dir_to_extract): @@ -207,27 +208,60 @@ def main(): # ownership will be preserved while unpacking (see below). prepare_tree_from_tar(in_file, fakeroot_statefile, fs_basedir, limit_prefix) strip_files(fs_basedir, fakeroot_statefile, strip_paths) - subprocess.run(['sync'], check=True) + subprocess.run(["sync"], check=True) # Now build the basic filesystem image. Wrap again in fakeroot # so correct permissions are read for all files etc. - mke2fs_args = ["faketime", "-f", "1970-1-1 0:0:0", "/usr/sbin/mkfs.ext4", "-E", "hash_seed=c61251eb-100b-48fe-b089-57dea7368612", "-U", "clear", "-F", image_file, str(image_size)] + mke2fs_args = [ + "faketime", + "-f", + "1970-1-1 0:0:0", + "/usr/sbin/mkfs.ext4", + "-E", + "hash_seed=c61251eb-100b-48fe-b089-57dea7368612", + "-U", + "clear", + "-F", + image_file, + str(image_size), + ] subprocess.run(mke2fs_args, check=True, env={"E2FSPROGS_FAKE_TIME": "0"}) # Use our tool, diroid, to create an fs_config file to be used by e2fsdroid. # This file is a simple list of files with their desired uid, gid, and mode. fs_config_path = os.path.join(tmpdir, "fs_config") - diroid_args=[args.diroid, "--fakeroot", fakeroot_statefile, "--input-dir", os.path.join(fs_basedir, limit_prefix), "--output", fs_config_path] + diroid_args = [ + args.diroid, + "--fakeroot", + fakeroot_statefile, + "--input-dir", + os.path.join(fs_basedir, limit_prefix), + "--output", + fs_config_path, + ] subprocess.run(diroid_args, check=True) - e2fsdroid_args= ["faketime", "-f", "1970-1-1 0:0:0", "fakeroot", "-i", fakeroot_statefile, "e2fsdroid", "-e", "-a", "/", "-T", "0"] + e2fsdroid_args = [ + "faketime", + "-f", + "1970-1-1 0:0:0", + "fakeroot", + "-i", + fakeroot_statefile, + "e2fsdroid", + "-e", + "-a", + "/", + "-T", + "0", + ] e2fsdroid_args += ["-C", fs_config_path] if file_contexts_file: e2fsdroid_args += ["-S", file_contexts_file] e2fsdroid_args += ["-f", os.path.join(fs_basedir, limit_prefix), image_file] subprocess.run(e2fsdroid_args, check=True, env={"E2FSPROGS_FAKE_TIME": "0"}) - subprocess.run(['sync'], check=True) + subprocess.run(["sync"], check=True) # We use our tool, dflate, to quickly create a sparse, deterministic, tar. # If dflate is ever misbehaving, it can be replaced with: diff --git a/toolchains/sysimage/build_fat32_image.py b/toolchains/sysimage/build_fat32_image.py index 937b3c510f8..b56ad007058 100755 --- a/toolchains/sysimage/build_fat32_image.py +++ b/toolchains/sysimage/build_fat32_image.py @@ -41,7 +41,17 @@ def untar_to_fat32(tf, fs_basedir, out_file, path_transform): with open(os.path.join(fs_basedir, path), "wb") as f: f.write(tf.extractfile(member).read()) subprocess.run( - ["faketime", "-f", "1970-1-1 0:0:0", "mcopy", "-o", "-i", out_file, os.path.join(fs_basedir, path), "::/" + path], + [ + "faketime", + "-f", + "1970-1-1 0:0:0", + "mcopy", + "-o", + "-i", + out_file, + os.path.join(fs_basedir, path), + "::/" + path, + ], check=True, ) else: diff --git a/toolchains/sysimage/build_lvm_image.py b/toolchains/sysimage/build_lvm_image.py index 3558982addc..f2b14c7e716 100755 --- a/toolchains/sysimage/build_lvm_image.py +++ b/toolchains/sysimage/build_lvm_image.py @@ -19,9 +19,10 @@ from crc import INITIAL_CRC, calc_crc LVM_HEADER_SIZE_BYTES = int(2048 * 512) -BYTES_PER_MEBIBYTE = int(2 ** 20) +BYTES_PER_MEBIBYTE = int(2**20) EXTENT_SIZE_BYTES = int(4 * BYTES_PER_MEBIBYTE) + def main(): parser = argparse.ArgumentParser() parser.add_argument("-o", "--out", help="Target (tzst) file to write lvm image to", type=str) diff --git a/toolchains/sysimage/build_vfat_image.py b/toolchains/sysimage/build_vfat_image.py index 67dedc845cf..2e9194c8b32 100755 --- a/toolchains/sysimage/build_vfat_image.py +++ b/toolchains/sysimage/build_vfat_image.py @@ -41,7 +41,17 @@ def untar_to_vfat(tf, fs_basedir, out_file, path_transform): with open(os.path.join(fs_basedir, path), "wb") as f: f.write(tf.extractfile(member).read()) subprocess.run( - ["faketime", "-f", "1970-1-1 0:0:0", "mcopy", "-o", "-i", out_file, os.path.join(fs_basedir, path), "::/" + path], + [ + "faketime", + "-f", + "1970-1-1 0:0:0", + "mcopy", + "-o", + "-i", + out_file, + os.path.join(fs_basedir, path), + "::/" + path, + ], check=True, ) else: diff --git a/toolchains/sysimage/container_utils.py b/toolchains/sysimage/container_utils.py index 863fd574ffc..3e27fd3a053 100644 --- a/toolchains/sysimage/container_utils.py +++ b/toolchains/sysimage/container_utils.py @@ -31,7 +31,6 @@ def take_ownership_of_file(file: Path): invoke.run(f"sudo chgrp {current_user} {file}") - def make_tmpfs(base_dir: str = DEFAULT_TMP_PREFIX) -> str: """ Mount a tmpfs volume in a subdirectory of the given `base_dir`. @@ -52,15 +51,17 @@ def make_tmpfs(base_dir: str = DEFAULT_TMP_PREFIX) -> str: return temp_sys_dir -def process_temp_sys_dir_args(temp_container_sys_dir: Optional[str], - tmpfs_container_sys_dir: Optional[str]) -> Optional[str]: +def process_temp_sys_dir_args( + temp_container_sys_dir: Optional[str], tmpfs_container_sys_dir: Optional[str] +) -> Optional[str]: """ Handles two program arguments - user chooses either temp dir or tmpfs dir for podman activities. Returns a string pointing at the temporary base dir or None. """ - assert not (temp_container_sys_dir and tmpfs_container_sys_dir), \ - "temp_container_sys_dir and tmpfs_container_sys_dir flags are mutually exclusive" + assert not ( + temp_container_sys_dir and tmpfs_container_sys_dir + ), "temp_container_sys_dir and tmpfs_container_sys_dir flags are mutually exclusive" if temp_container_sys_dir: return temp_container_sys_dir @@ -88,8 +89,7 @@ def create_container_system_dirs(base_dir: Path) -> ContainerSystemDirs: # Remove this whole function when podman heisenbug no longer applies - see NODE-973 atexit.register(lambda: invoke.run(f"sudo rm -rf {container_sys_dir} {container_run_dir}")) - return ContainerSystemDirs(Path(container_sys_dir), - Path(container_run_dir)) + return ContainerSystemDirs(Path(container_sys_dir), Path(container_run_dir)) def remove_image(container_cmd: str, image_tag: str): @@ -102,4 +102,3 @@ def generate_container_command(default_command: str, temp_sys_dir: Optional[Path return f"{default_command} --root {dirs.sys_dir} --runroot {dirs.run_dir} " return default_command -