diff --git a/.github/workflows-source/schedule-daily.yml b/.github/workflows-source/schedule-daily.yml index afde1ad4dd4..558ceaa3bde 100644 --- a/.github/workflows-source/schedule-daily.yml +++ b/.github/workflows-source/schedule-daily.yml @@ -121,14 +121,29 @@ jobs: run: | echo "$ZH2_DLL01_CSV_SECRETS" > file1 echo "$ZH2_FILE_SHARE_KEY" > file2 && chmod 400 file2 + + # Run bare metal installation test # shellcheck disable=SC2046,SC2086 bazel ${BAZEL_STARTUP_ARGS} run ${BAZEL_CI_CONFIG} \ //ic-os/setupos/envs/dev:launch_bare_metal -- \ --config_path "$(realpath ./ic-os/dev-tools/bare_metal_deployment/zh2-dll01.yaml)" \ --csv_filename "$(realpath file1)" \ --file_share_ssh_key "$(realpath file2)" \ + --inject_image_pub_key "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK3gjE/2K5nxIBbk3ohgs8J5LW+XiObwA+kGtSaF5+4c" \ --file_share_username ci_interim \ --ci_mode + + # Run bare metal node performance benchmarks + # shellcheck disable=SC2046,SC2086 + bazel ${BAZEL_STARTUP_ARGS} run ${BAZEL_CI_CONFIG} \ + //ic-os/setupos/envs/dev:launch_bare_metal -- \ + --config_path "$(realpath ./ic-os/dev-tools/bare_metal_deployment/zh2-dll01.yaml)" \ + --csv_filename "$(realpath file1)" \ + --file_share_ssh_key "$(realpath file2)" \ + --inject_image_pub_key "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK3gjE/2K5nxIBbk3ohgs8J5LW+XiObwA+kGtSaF5+4c" \ + --file_share_username ci_interim \ + --ci_mode \ + --benchmark bazel clean env: BAZEL_STARTUP_ARGS: "--output_base=/var/tmp/bazel-output/" diff --git a/.github/workflows/schedule-daily.yml b/.github/workflows/schedule-daily.yml index 968a27e9d74..a4d58ca00ba 100644 --- a/.github/workflows/schedule-daily.yml +++ b/.github/workflows/schedule-daily.yml @@ -97,14 +97,29 @@ jobs: run: | echo "$ZH2_DLL01_CSV_SECRETS" > file1 echo "$ZH2_FILE_SHARE_KEY" > file2 && chmod 400 file2 + + # Run bare metal installation test # shellcheck disable=SC2046,SC2086 bazel ${BAZEL_STARTUP_ARGS} run ${BAZEL_CI_CONFIG} \ //ic-os/setupos/envs/dev:launch_bare_metal -- \ --config_path "$(realpath ./ic-os/dev-tools/bare_metal_deployment/zh2-dll01.yaml)" \ --csv_filename "$(realpath file1)" \ --file_share_ssh_key "$(realpath file2)" \ + --inject_image_pub_key "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK3gjE/2K5nxIBbk3ohgs8J5LW+XiObwA+kGtSaF5+4c" \ --file_share_username ci_interim \ --ci_mode + + # Run bare metal node performance benchmarks + # shellcheck disable=SC2046,SC2086 + bazel ${BAZEL_STARTUP_ARGS} run ${BAZEL_CI_CONFIG} \ + //ic-os/setupos/envs/dev:launch_bare_metal -- \ + --config_path "$(realpath ./ic-os/dev-tools/bare_metal_deployment/zh2-dll01.yaml)" \ + --csv_filename "$(realpath file1)" \ + --file_share_ssh_key "$(realpath file2)" \ + --inject_image_pub_key "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK3gjE/2K5nxIBbk3ohgs8J5LW+XiObwA+kGtSaF5+4c" \ + --file_share_username ci_interim \ + --ci_mode \ + --benchmark bazel clean env: BAZEL_STARTUP_ARGS: "--output_base=/var/tmp/bazel-output/" diff --git a/Cargo.lock b/Cargo.lock index 1838e477610..c7034a2e7d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2387,6 +2387,16 @@ name = "config" version = "1.0.0" dependencies = [ "anyhow", + "clap 4.5.17", + "ic-types", + "once_cell", + "regex", + "serde", + "serde_json", + "serde_with 1.14.0", + "tempfile", + "url", + "utils", ] [[package]] @@ -5282,7 +5292,6 @@ dependencies = [ "ic-nervous-system-root", "ic-nns-common", "ic-nns-constants", - "ic-nns-governance", "ic-nns-governance-api", "ic-nns-handler-root", "ic-nns-init", @@ -5871,10 +5880,15 @@ dependencies = [ "candid", "candid_parser", "futures", + "ic-base-types", + "ic-btc-interface", "ic-cdk 0.13.5", "ic-test-utilities-load-wasm", + "ic-types", + "ic-universal-canister", "pocket-ic", "serde", + "tokio", ] [[package]] @@ -9360,6 +9374,7 @@ dependencies = [ "reqwest 0.12.7", "serde", "serde_json", + "sha2 0.10.8", "strum", "strum_macros", "tempfile", @@ -9651,6 +9666,10 @@ dependencies = [ "prometheus-parse", ] +[[package]] +name = "ic-nervous-system-common-validation" +version = "0.9.0" + [[package]] name = "ic-nervous-system-governance" version = "0.0.1" @@ -9985,6 +10004,7 @@ dependencies = [ "ic-base-types", "ic-crypto-sha2", "ic-nervous-system-clients", + "ic-nervous-system-common-validation", "ic-nervous-system-proto", "ic-nns-common", "ic-protobuf", @@ -10951,7 +10971,6 @@ dependencies = [ "ic-consensus", "ic-consensus-utils", "ic-crypto-for-verification-only", - "ic-crypto-test-utils-ni-dkg", "ic-cycles-account-manager", "ic-execution-environment", "ic-interfaces", @@ -11443,7 +11462,7 @@ dependencies = [ "ic-nervous-system-proto", "ic-nns-common", "ic-nns-constants", - "ic-nns-governance", + "ic-nns-governance-api", "ic-sns-governance", "ic-sns-init", "ic-sns-root", @@ -11494,6 +11513,7 @@ dependencies = [ "ic-nervous-system-common-build-metadata", "ic-nervous-system-common-test-keys", "ic-nervous-system-common-test-utils", + "ic-nervous-system-common-validation", "ic-nervous-system-governance", "ic-nervous-system-lock", "ic-nervous-system-proto", @@ -11602,6 +11622,7 @@ dependencies = [ "ic-nervous-system-common-test-keys", "ic-nervous-system-proto", "ic-nns-constants", + "ic-nns-governance-api", "ic-sns-governance", "ic-sns-init-protobuf-generator", "ic-sns-root", @@ -18774,6 +18795,7 @@ version = "0.1.0" dependencies = [ "anyhow", "clap 4.5.17", + "config", "partition_tools", "serde", "serde_json", @@ -18781,7 +18803,6 @@ dependencies = [ "tempfile", "tokio", "url", - "utils", ] [[package]] @@ -20891,11 +20912,6 @@ name = "utils" version = "1.0.0" dependencies = [ "anyhow", - "once_cell", - "serde", - "serde_json", - "serde_with 1.14.0", - "url", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 192bc60e6b5..a9fce2a6839 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -168,6 +168,7 @@ members = [ "rs/nervous_system/common/test_canister", "rs/nervous_system/common/test_keys", "rs/nervous_system/common/test_utils", + "rs/nervous_system/common/validation", "rs/nervous_system/humanize", "rs/nervous_system/integration_tests", "rs/nervous_system/lock", diff --git a/ic-os/components/BUILD.bazel b/ic-os/components/BUILD.bazel index da922a803bf..b94f2331519 100644 --- a/ic-os/components/BUILD.bazel +++ b/ic-os/components/BUILD.bazel @@ -40,6 +40,7 @@ REPO_COMPONENTS = glob( # files used for testing and development that aren't "used" by any ic-os variant ignored_repo_components = [ + "hostos-scripts/generate-guestos-config/dev-generate-guestos-config.sh", "networking/dev-certs/canister_http_test_ca.key", "networking/dev-certs/root_cert_gen.sh", ] diff --git a/ic-os/components/hostos-scripts/generate-guestos-config/dev-generate-guestos-config.sh b/ic-os/components/hostos-scripts/generate-guestos-config/dev-generate-guestos-config.sh new file mode 100755 index 00000000000..49276f8719b --- /dev/null +++ b/ic-os/components/hostos-scripts/generate-guestos-config/dev-generate-guestos-config.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +set -e + +# Generate the GuestOS configuration. + +# Source the functions required for writing metrics +source /opt/ic/bin/metrics.sh + +SCRIPT="$(basename $0)[$$]" + +# Get keyword arguments +for argument in "${@}"; do + case ${argument} in + -c=* | --config=*) + CONFIG="${argument#*=}" + shift + ;; + -d=* | --deployment=*) + DEPLOYMENT="${argument#*=}" + shift + ;; + -h | --help) + echo 'Usage: +Generate GuestOS Configuration + +Arguments: + -c=, --config= specify the config.ini configuration file (Default: /boot/config/config.ini) + -d=, --deployment= specify the deployment.json configuration file (Default: /boot/config/deployment.json) + -h, --help show this help message and exit + -i=, --input= specify the input template file (Default: /opt/ic/share/guestos.xml.template) + -m=, --media= specify the config media image file (Default: /run/ic-node/config.img) + -o=, --output= specify the output configuration file (Default: /var/lib/libvirt/guestos.xml) +' + exit 1 + ;; + -i=* | --input=*) + INPUT="${argument#*=}" + shift + ;; + -m=* | --media=*) + MEDIA="${argument#*=}" + shift + ;; + -o=* | --output=*) + OUTPUT="${argument#*=}" + shift + ;; + *) + echo "Error: Argument is not supported." + exit 1 + ;; + esac +done + +function validate_arguments() { + if [ "${CONFIG}" == "" -o "${DEPLOYMENT}" == "" -o "${INPUT}" == "" -o "${OUTPUT}" == "" ]; then + $0 --help + fi +} + +# Set arguments if undefined +CONFIG="${CONFIG:=/boot/config/config.ini}" +DEPLOYMENT="${DEPLOYMENT:=/boot/config/deployment.json}" +INPUT="${INPUT:=/opt/ic/share/guestos.xml.template}" +MEDIA="${MEDIA:=/run/ic-node/config.img}" +OUTPUT="${OUTPUT:=/var/lib/libvirt/guestos.xml}" + +write_log() { + local message=$1 + + if [ -t 1 ]; then + echo "${SCRIPT} ${message}" >/dev/stdout + fi + + logger -t ${SCRIPT} "${message}" +} + +function read_variables() { + # Read limited set of keys. Be extra-careful quoting values as it could + # otherwise lead to executing arbitrary shell code! + while IFS="=" read -r key value; do + case "$key" in + "ipv6_prefix") ipv6_prefix="${value}" ;; + "ipv6_gateway") ipv6_gateway="${value}" ;; + "ipv4_address") ipv4_address="${value}" ;; + "ipv4_prefix_length") ipv4_prefix_length="${value}" ;; + "ipv4_gateway") ipv4_gateway="${value}" ;; + "domain") domain="${value}" ;; + esac + done <"${CONFIG}" +} + +function assemble_config_media() { + cmd=(/opt/ic/bin/build-bootstrap-config-image.sh ${MEDIA}) + cmd+=(--nns_public_key "/boot/config/nns_public_key.pem") + cmd+=(--elasticsearch_hosts "$(/opt/ic/bin/fetch-property.sh --key=.logging.hosts --metric=hostos_logging_hosts --config=${DEPLOYMENT})") + cmd+=(--ipv6_address "$(/opt/ic/bin/hostos_tool generate-ipv6-address --node-type GuestOS)") + cmd+=(--ipv6_gateway "${ipv6_gateway}") + if [[ -n "$ipv4_address" && -n "$ipv4_prefix_length" && -n "$ipv4_gateway" && -n "$domain" ]]; then + cmd+=(--ipv4_address "${ipv4_address}/${ipv4_prefix_length}") + cmd+=(--ipv4_gateway "${ipv4_gateway}") + cmd+=(--domain "${domain}") + fi + cmd+=(--hostname "guest-$(/opt/ic/bin/fetch-mgmt-mac.sh | sed 's/://g')") + cmd+=(--nns_url "$(/opt/ic/bin/fetch-property.sh --key=.nns.url --metric=hostos_nns_url --config=${DEPLOYMENT})") + if [ -f "/boot/config/node_operator_private_key.pem" ]; then + cmd+=(--node_operator_private_key "/boot/config/node_operator_private_key.pem") + fi + + cmd+=(--accounts_ssh_authorized_keys "/boot/config/ssh_authorized_keys") + + # Run the above command + "${cmd[@]}" + write_log "Assembling config media for GuestOS: ${MEDIA}" +} + +function generate_guestos_config() { + RESOURCES_MEMORY=$(/opt/ic/bin/fetch-property.sh --key=.resources.memory --metric=hostos_resources_memory --config=${DEPLOYMENT}) + MAC_ADDRESS=$(/opt/ic/bin/hostos_tool generate-mac-address --node-type GuestOS) + # NOTE: `fetch-property` will error if the target is not found. Here we + # only want to act when the field is set. + CPU_MODE=$(jq -r ".resources.cpu" ${DEPLOYMENT}) + + CPU_DOMAIN="kvm" + CPU_SPEC="/opt/ic/share/kvm-cpu.xml" + if [ "${CPU_MODE}" == "qemu" ]; then + CPU_DOMAIN="qemu" + CPU_SPEC="/opt/ic/share/qemu-cpu.xml" + fi + + if [ ! -f "${OUTPUT}" ]; then + mkdir -p "$(dirname "$OUTPUT")" + sed -e "s@{{ resources_memory }}@${RESOURCES_MEMORY}@" \ + -e "s@{{ mac_address }}@${MAC_ADDRESS}@" \ + -e "s@{{ cpu_domain }}@${CPU_DOMAIN}@" \ + -e "/{{ cpu_spec }}/{r ${CPU_SPEC}" -e "d" -e "}" \ + "${INPUT}" >"${OUTPUT}" + restorecon -R "$(dirname "$OUTPUT")" + write_log "Generating GuestOS configuration file: ${OUTPUT}" + write_metric "hostos_generate_guestos_config" \ + "1" \ + "HostOS generate GuestOS config" \ + "gauge" + else + write_log "GuestOS configuration file already exists: ${OUTPUT}" + write_metric "hostos_generate_guestos_config" \ + "0" \ + "HostOS generate GuestOS config" \ + "gauge" + fi +} + +function main() { + # Establish run order + validate_arguments + read_variables + assemble_config_media + generate_guestos_config +} + +main diff --git a/ic-os/components/init/bootstrap-ic-node/guestos/bootstrap-ic-node.sh b/ic-os/components/init/bootstrap-ic-node/guestos/bootstrap-ic-node.sh index c84b7d5ed19..6b1f5f8b134 100755 --- a/ic-os/components/init/bootstrap-ic-node/guestos/bootstrap-ic-node.sh +++ b/ic-os/components/init/bootstrap-ic-node/guestos/bootstrap-ic-node.sh @@ -135,6 +135,10 @@ write_metric_attr "guestos_boot_action" \ "GuestOS boot action" \ "gauge" +# /boot/config/CONFIGURED serves as a tag to indicate that the one-time bootstrap configuration has been completed. +# If the `/boot/config/CONFIGURED` file is not present, the boot sequence will +# search for a virtual USB stick (the bootstrap config image) +# containing the injected configuration files, and create the file. if [ -f /boot/config/CONFIGURED ]; then echo "Bootstrap completed already" fi diff --git a/ic-os/dev-tools/bare_metal_deployment/BUILD.bazel b/ic-os/dev-tools/bare_metal_deployment/BUILD.bazel index c4d9e93fc91..23d6f25dd63 100644 --- a/ic-os/dev-tools/bare_metal_deployment/BUILD.bazel +++ b/ic-os/dev-tools/bare_metal_deployment/BUILD.bazel @@ -1,11 +1,7 @@ package(default_visibility = ["//rs:ic-os-pkg"]) -exports_files(["deploy.py"]) - -# Necessary to find the directory where the scripts are stored. -genrule( - name = "find_idrac_package_path", - outs = ["idrac_package_path.txt"], - cmd = "echo $(location @python_deps_idracredfishsupport//:IdracRedfishSupport-0.0.8.data/scripts/VirtualDiskExpansionREDFISH.py) | xargs dirname > $@", - tools = ["@python_deps_idracredfishsupport//:IdracRedfishSupport-0.0.8.data/scripts/VirtualDiskExpansionREDFISH.py"], -) +exports_files([ + "deploy.py", + "benchmark_runner.sh", + "benchmark_driver.sh", +]) diff --git a/ic-os/dev-tools/bare_metal_deployment/benchmark_driver.sh b/ic-os/dev-tools/bare_metal_deployment/benchmark_driver.sh new file mode 100755 index 00000000000..46772097890 --- /dev/null +++ b/ic-os/dev-tools/bare_metal_deployment/benchmark_driver.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +set -eEuo pipefail + +benchmark_runner="${1}" +ssh_key="${2}" +ip_address="${3}" +tools="${@:4}" + +key_component=() +if [ "${ssh_key}" != "None" ]; then + key_component=("-i" "${ssh_key}") +fi + +TMPDIR=$(mktemp -d) +trap "rm -rf ${TMPDIR}" exit + +# Send over the runner +scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${key_component[@]} "${benchmark_runner}" "admin@[${ip_address}]:benchmark_runner.sh" + +# Send over the tools +if [ "${tools}" != "" ]; then + scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${key_component[@]} ${tools[@]} "admin@[${ip_address}]:" +fi + +# Run benchmarking +echo >&2 "Benchmarking node..." +ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${key_component[@]} "admin@${ip_address}" "sudo ./benchmark_runner.sh" + +# Collect results +scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${key_component[@]} -r "admin@[${ip_address}]:results" "${TMPDIR}" + +# Output results +echo "-------------------- Benchmark Results --------------------" +cat ${TMPDIR}/results/* +echo "-----------------------------------------------------------" diff --git a/ic-os/dev-tools/bare_metal_deployment/benchmark_runner.sh b/ic-os/dev-tools/bare_metal_deployment/benchmark_runner.sh new file mode 100755 index 00000000000..46c4b4fcbe6 --- /dev/null +++ b/ic-os/dev-tools/bare_metal_deployment/benchmark_runner.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -ux + +rm -rf results +mkdir -p results + +mkdir -p /var/lib/ic/data/benchmark + +{ time dd if=/dev/zero of=/var/lib/ic/data/benchmark/out bs=1M count=10000 oflag=direct; } 2>results/big-dd.log +{ time bash -c 'for i in {1..16}; do dd if=/dev/zero of=/var/lib/ic/data/benchmark/out_${i} bs=1M count=1000 oflag=direct & done ; wait'; } 2>results/small-dds.log +fio --filename=/var/lib/ic/data/benchmark/testfile --io_size=10GB --size=60GB --direct=1 --buffered=0 --invalidate 1 --norandommap --random_generator=lfsr --rw=write --bs=4m --ioengine=libaio --iodepth=32 --numjobs=16 --group_reporting --name=throughput-sw-job --eta-newline=1 >results/fio.log + +./benchmark.sh +./stress.sh + +exit 0 diff --git a/ic-os/dev-tools/bare_metal_deployment/deploy.py b/ic-os/dev-tools/bare_metal_deployment/deploy.py index 6044d98ec40..4a251fb3535 100755 --- a/ic-os/dev-tools/bare_metal_deployment/deploy.py +++ b/ic-os/dev-tools/bare_metal_deployment/deploy.py @@ -92,6 +92,9 @@ class Args: # If present - decompress `upload_img` and inject this into config.ini inject_image_verbose: Optional[str] = None + # If present - decompress `upload_img` and inject this into ssh_authorized_keys/admin + inject_image_pub_key: Optional[str] = None + # Path to the setupos-inject-configuration tool. Necessary if any inject* args are present inject_configuration_tool: Optional[str] = None @@ -101,12 +104,24 @@ class Args: # How many nodes should be deployed in parallel parallel: int = 1 - # Directory where idrac scripts are held. If None, pip bin directory will be used. - idrac_script_dir_file: Optional[str] = None + # Path to an idrac script, which we use to find the directory. If None, pip bin directory will be used. + idrac_script: Optional[str] = None # Disable progress bars if True ci_mode: bool = flag(default=False) + # Run benchmarks if True + benchmark: bool = flag(default=False) + + # Path to the benchmark_driver script. + benchmark_driver_script: Optional[str] = "./benchmark_driver.sh" + + # Path to the benchmark_runner script. + benchmark_runner_script: Optional[str] = "./benchmark_runner.sh" + + # Paths to any benchmark tool scripts. + benchmark_tools: Optional[List[str]] = field(default_factory=lambda: ["../hw_validation/stress.sh", "../hw_validation/benchmark.sh"]) + def __post_init__(self): assert self.upload_img is None or self.upload_img.endswith( ".tar.zst" @@ -440,6 +455,47 @@ def boot_images(bmc_infos: List[BMCInfo], return True +def benchmark_node(bmc_info: BMCInfo, benchmark_driver_script: str, benchmark_runner_script: str, benchmark_tools: List[str], file_share_ssh_key: Optional[str] = None): + log.info("Benchmarking machine.") + + ip_address = bmc_info.guestos_ipv6_address + + benchmark_tools = " ".join(benchmark_tools) if benchmark_tools is not None else "" + + # Throw away the result, for now + invoke.run(f"{benchmark_driver_script} {benchmark_runner_script} {file_share_ssh_key} {ip_address} {benchmark_tools}", warn=True) + return OperationResult(bmc_info, success=True) + + +def benchmark_nodes(bmc_infos: List[BMCInfo], + parallelism: int, + benchmark_driver_script: str, + benchmark_runner_script: str, + benchmark_tools: List[str], + file_share_ssh_key: Optional[str] = None): + results: List[OperationResult] = [] + + arg_tuples = ((bmc_info, benchmark_driver_script, benchmark_runner_script, benchmark_tools, file_share_ssh_key) \ + for bmc_info in bmc_infos) + + with Pool(parallelism) as p: + results = p.starmap(benchmark_node, arg_tuples) + + log.info("Benchmark summary:") + benchmark_failure = False + for res in results: + log.info(res) + if not res.success: + benchmark_failure = True + + if benchmark_failure: + log.error("One or more node benchmarks failed") + return False + else: + log.info("All benchmarks completed successfully.") + return True + + def create_file_share_endpoint(file_share_url: str, file_share_username: Optional[str]) -> str: return file_share_url \ @@ -490,7 +546,8 @@ def inject_config_into_image(setupos_inject_configuration_path: Path, ipv6_prefix: str, ipv6_gateway: str, ipv4_args: Optional[Ipv4Args], - verbose: Optional[str]) -> Path: + verbose: Optional[str], + pub_key: Optional[str]) -> Path: """ Transform the compressed image. * Decompress image into working_dir @@ -525,7 +582,11 @@ def is_executable(p: Path) -> bool: if verbose: verbose_part = f"--verbose {verbose} " - invoke.run(f"{setupos_inject_configuration_path} {image_part} {prefix_part} {gateway_part} {ipv4_part} {verbose_part}", echo=True) + admin_key_part = "" + if pub_key: + admin_key_part = f"--public-keys \"{pub_key}\"" + + invoke.run(f"{setupos_inject_configuration_path} {image_part} {prefix_part} {gateway_part} {ipv4_part} {verbose_part} {admin_key_part}", echo=True) # Reuse the name of the compressed image path in the working directory result_filename = compressed_image_path.name @@ -535,15 +596,6 @@ def is_executable(p: Path) -> bool: return result_path -def get_idrac_script_dir(idrac_script_dir_file: Optional[str]) -> Path: - if idrac_script_dir_file: - log.info(f"Using idrac script dir file: {idrac_script_dir_file}") - idrac_script_dir = open(idrac_script_dir_file).read().strip() - assert '\n' not in idrac_script_dir, "idrac script dir file must be one line" - return Path(idrac_script_dir) - return Path(DEFAULT_IDRAC_SCRIPT_DIR) - - def main(): print(sys.argv) args: Args = parse(Args, add_config_path_arg=True) # Parse from config file too @@ -551,11 +603,11 @@ def main(): DISABLE_PROGRESS_BAR = args.ci_mode # noqa - ruff format wants to erroneously delete this network_image_url: str = ( - f"{args.file_share_url}:{args.file_share_dir}/{args.file_share_image_filename}" + f"http://{args.file_share_url}/{args.file_share_image_filename}" ) log.info(f"Using network_image_url: {network_image_url}") - idrac_script_dir = get_idrac_script_dir(args.idrac_script_dir_file) + idrac_script_dir = Path(args.idrac_script).parent if args.idrac_script else Path(DEFAULT_IDRAC_SCRIPT_DIR) log.info(f"Using idrac script dir: {idrac_script_dir}") csv_filename: str = args.csv_filename @@ -568,6 +620,22 @@ def main(): args.inject_image_ipv4_prefix_length, args.inject_image_domain) + # Benchmark these nodes, rather than deploy them. + if args.benchmark: + success = benchmark_nodes( + bmc_infos=bmc_infos, + parallelism=args.parallel, + benchmark_driver_script=args.benchmark_driver_script, + benchmark_runner_script=args.benchmark_runner_script, + benchmark_tools=args.benchmark_tools, + file_share_ssh_key=args.file_share_ssh_key, + ) + + if not success: + sys.exit(1) + + sys.exit(0) + if args.upload_img or args.inject_image_ipv6_prefix: file_share_endpoint = create_file_share_endpoint(args.file_share_url, args.file_share_username) assert_ssh_connectivity(file_share_endpoint, args.file_share_ssh_key) @@ -583,7 +651,8 @@ def main(): args.inject_image_ipv6_prefix, args.inject_image_ipv6_gateway, ipv4_args, - args.inject_image_verbose + args.inject_image_verbose, + args.inject_image_pub_key ) upload_to_file_share( diff --git a/ic-os/dev-tools/bare_metal_deployment/tools.bzl b/ic-os/dev-tools/bare_metal_deployment/tools.bzl index 8b5b64fc342..bef40135eec 100644 --- a/ic-os/dev-tools/bare_metal_deployment/tools.bzl +++ b/ic-os/dev-tools/bare_metal_deployment/tools.bzl @@ -34,9 +34,25 @@ def launch_bare_metal(name, image_zst_file): "$(location //rs/ic_os/dev_test_tools/setupos-inject-configuration)", "--upload_img", "$(location " + image_zst_file + ")", - "--idrac_script_dir", - "$(location //ic-os/dev-tools/bare_metal_deployment:find_idrac_package_path)", + "--idrac_script", + "$(location @python_deps_idracredfishsupport//:IdracRedfishSupport-0.0.8.data/scripts/VirtualDiskExpansionREDFISH.py)", + "--benchmark_driver_script", + "$(location //ic-os/dev-tools/bare_metal_deployment:benchmark_driver.sh)", + "--benchmark_runner_script", + "$(location //ic-os/dev-tools/bare_metal_deployment:benchmark_runner.sh)", + "--benchmark_tools", + "$(location //ic-os/dev-tools/hw_validation:stress.sh)", + "$(location //ic-os/dev-tools/hw_validation:benchmark.sh)", + ], + data = [ + ":" + binary_name, + image_zst_file, + "//rs/ic_os/dev_test_tools/setupos-inject-configuration", + "@python_deps_idracredfishsupport//:IdracRedfishSupport-0.0.8.data/scripts/VirtualDiskExpansionREDFISH.py", + "//ic-os/dev-tools/bare_metal_deployment:benchmark_runner.sh", + "//ic-os/dev-tools/bare_metal_deployment:benchmark_driver.sh", + "//ic-os/dev-tools/hw_validation:stress.sh", + "//ic-os/dev-tools/hw_validation:benchmark.sh", ], - data = [":" + binary_name, image_zst_file, "//rs/ic_os/dev_test_tools/setupos-inject-configuration", "//ic-os/dev-tools/bare_metal_deployment:find_idrac_package_path"], tags = ["manual"], ) diff --git a/ic-os/dev-tools/hw_validation/BUILD.bazel b/ic-os/dev-tools/hw_validation/BUILD.bazel new file mode 100644 index 00000000000..95e2b39ed5a --- /dev/null +++ b/ic-os/dev-tools/hw_validation/BUILD.bazel @@ -0,0 +1,6 @@ +package(default_visibility = ["//rs:ic-os-pkg"]) + +exports_files([ + "benchmark.sh", + "stress.sh", +]) diff --git a/ic-os/dev-tools/hw_validation/benchmark.sh b/ic-os/dev-tools/hw_validation/benchmark.sh index db6efdb6d9f..7565de54d7d 100755 --- a/ic-os/dev-tools/hw_validation/benchmark.sh +++ b/ic-os/dev-tools/hw_validation/benchmark.sh @@ -15,17 +15,26 @@ if ! command -v sysbench &>/dev/null; then exit fi -TMP_DIR_FS=$(stat -f -c %T /tmp) -if [ "$TMP_DIR_FS" == "tmpfs" ]; then +TARGET_DIR="${TARGET_DIR:-/var/lib/ic/data/benchmark}" +mkdir -p "${TARGET_DIR}" + +TARGET_DIR_FS=$(stat -f -c %T "${TARGET_DIR}") +if [ "$TARGET_DIR_FS" == "tmpfs" ]; then echo "/tmp is mounted as a tmpfs. Need a suitable location for benchmarking file io from disk. Exiting." exit fi NUM_PROCS=$(nproc) -OUTPUT_FILE="$(pwd)/bench_results_$(hostname)_$(date +%Y-%m-%dT%H-%M-%S).txt" +OUTPUT_DIR="$(pwd)/results" +mkdir -p "${OUTPUT_DIR}" +TEST_NAME="bench_results_$(hostname)_$(date +%Y-%m-%dT%H-%M-%S)" +OUTPUT_FILE="${OUTPUT_DIR}/${TEST_NAME}.txt" # Wrapped in code block to reroute all output to log { + echo "${TEST_NAME}" + + pushd "${TARGET_DIR}" # CPU tests - 1 vs all print_exec sysbench cpu run \ @@ -74,13 +83,6 @@ OUTPUT_FILE="$(pwd)/bench_results_$(hostname)_$(date +%Y-%m-%dT%H-%M-%S).txt" # File IO test ## Benchmarks: sync, mmap, fsync all, fsync every 10th - ## Use tmp to not junk up CWD - TEMP_DIR="/tmp/sysbench_$(date +%s)" - mkdir -p "$TEMP_DIR" - pushd "$TEMP_DIR" || { - echo "Unable to create test directory $TEMP_DIR" - exit 1 - } print_exec sysbench fileio prepare --file-test-mode=rndrw ## Single threaded @@ -96,7 +98,7 @@ OUTPUT_FILE="$(pwd)/bench_results_$(hostname)_$(date +%Y-%m-%dT%H-%M-%S).txt" print_exec sysbench fileio run --file-test-mode=rndrw --file-io-mode=sync --threads="$NUM_PROCS" --file-fsync-freq=10 print_exec sysbench fileio cleanup - popd || { exit 1; } - rmdir "$TEMP_DIR" + + popd } >>"$OUTPUT_FILE" diff --git a/ic-os/dev-tools/hw_validation/stress.sh b/ic-os/dev-tools/hw_validation/stress.sh index fcf43014862..6c5b9f6b2e5 100755 --- a/ic-os/dev-tools/hw_validation/stress.sh +++ b/ic-os/dev-tools/hw_validation/stress.sh @@ -8,12 +8,15 @@ fi set -u set -x +OUTPUT_DIR="$(pwd)/results" +mkdir -p "${OUTPUT_DIR}" + # Exercise all stressors sequentially. Use all processors. # Time out after 10 seconds for each stressor. # Print metrics. Verify outputs where relevant. # Note that using the `--all` parameter instead of `--sequential` may crash the machine. stress-ng --sequential "$(nproc)" \ - --log-file "./stress_test_$(hostname)_$(date +%Y-%m-%dT%H-%M-%S).txt" \ + --log-file "${OUTPUT_DIR}/stress_test_$(hostname)_$(date +%Y-%m-%dT%H-%M-%S).txt" \ --timeout 30 \ --metrics \ --verify \ diff --git a/ic-os/docs/Configuration.adoc b/ic-os/docs/Configuration.adoc index 2f5824961f3..d505d0359b3 100644 --- a/ic-os/docs/Configuration.adoc +++ b/ic-os/docs/Configuration.adoc @@ -20,7 +20,7 @@ SetupOS validates, sanitizes, and copies all of its configuration files to the H deployment.json # Deployment-specific configurations nns_public_key.pem # NNS public key -Refer to link:../components/setupos-scripts/config.sh[config.sh] link:../components/setupos-scripts/setup-hostos-config.sh[setup-hostos-config.sh] +Refer to link:../../rs/ic_os/config/README.md[rs/ic_os/config] & link:../components/setupos-scripts/setup-hostos-config.sh[setup-hostos-config.sh] === HostOS -> GuestOS diff --git a/ic-os/guestos/context/packages.dev b/ic-os/guestos/context/packages.dev index c25abc98203..8b524507625 100644 --- a/ic-os/guestos/context/packages.dev +++ b/ic-os/guestos/context/packages.dev @@ -26,6 +26,8 @@ linux-tools-virtual-hwe-24.04 # performance testing additions fio +stress-ng +sysbench bpftrace linux-tools-common linux-tools-generic @@ -34,4 +36,4 @@ perf-tools-unstable # xz needed by bpftrace and others xz-utils # static busybox for various missing utilities and to have a static binary with less noise -busybox-static \ No newline at end of file +busybox-static diff --git a/ic-os/guestos/docs/ConfigStore.adoc b/ic-os/guestos/docs/ConfigStore.adoc index 5044df14987..f0d0c8e382d 100644 --- a/ic-os/guestos/docs/ConfigStore.adoc +++ b/ic-os/guestos/docs/ConfigStore.adoc @@ -1,6 +1,6 @@ = GuestOS Config Store -This document describes the contents of the GuestOS *config* partition (*/dev/vda3* in the GuestOS disk image). The config partition stores information that must be preserved across system upgrades and needs to be available during early boot time. Consequently, this information cannot reside within the encrypted payload data partition. +This document calls out some of the contents of the GuestOS *config* partition (*/dev/vda3* in the GuestOS disk image). The config partition stores information that must be preserved across system upgrades and needs to be available during early boot time. Consequently, this information cannot reside within the encrypted payload data partition. Currently, all contents in the config partition are stored as plain-text without integrity protection. @@ -22,108 +22,10 @@ This file contains the key material used to derive the wrapping key for all bloc In the absence of a sealing key (which will be available in SEV-protected trusted execution environments), the `store.keyfile` is stored as plain-text. Once a sealing key becomes available, it should be used to wrap the contents of this file. -=== ssh/ - -This directory contains SSH host keys. These keys must be persisted across upgrades and are transferred to `/etc/ssh` during the boot process. - -=== node_exporter/ - -This directory contains the Node Exporter TLS keys. These keys must be persisted across upgrades and are transferred to `/etc/node_exporter` during the boot process. - -=== accounts_ssh_authorized_keys/ - -This directory contains individual files named `admin`, `backup`, `readonly` and `root`. The contents of these files serve as `authorized_keys` for their respective role account. This means that, for example, `accounts_ssh_authorized_keys/admin` is transferred to `~admin/.ssh/authorized_keys` on the target system. - -* *admin*: ... -* *backup / readonly*: These files can only be modified via an NNS proposal, and are in place for subnet recovery or issue debugging purposes. -* *root*: Used for development/debug builds only. - -This directory and any file in it is optional. By default, no authorized key is installed for any account, meaning no one has SSH access to the GuestOS. - -=== node_operator_private_key.pem - -This file contains the Node Operator private key, which is registered with the NNS and used to sign the IC join request. The private key can be generated using one of the following commands: - - dfx identity new --disable-encryption node_operator - cp ~/.config/dfx/identity/node_operator/identity.pem ./node_operator_private_key.pem - -Or - - quill generate --pem-file node_operator_private_key.pem - -=== network.conf - -Network configuration parameters. - -Must be a file of key/value pairs separated by "=" (one per line) with the following possible keys: - -- *ipv6_address*: The IPv6 address of the node, used for the node to "identify" itself (via the registry). All public IC services are offered through this address, which will be assigned to the enp1s0 interface. It is used as the "private" management access to the node. If left blank, SLAAC is used on the interface. - -- *ipv6_gateway*: The default IPv6 gateway, only meaningful if ipv6_address is also provided. - -- *hostname*: The hostname, which can be any text in principle but is generally derived from the ID of the physical host (e.g., MAC address). - -Note: if this file is not given, the system will fall back to network auto configuration. - -=== nns.conf - -The IP address(es) of NNS node(s). - -Must be a file of key/value pairs separated by "=" (one per line) with the following possible keys: - -- *nns_url*: The URL (HTTP) of the NNS node(s). If multiple URLs are provided, separate them with whitespace. If this key is not specified, http://127.0.0.1:8080 is assumed by default (which only works for nodes that do not require registration). - -This configuration is used when generating the replica configuration to fill in the nns_url placeholder. - -=== nns_public_key.pem - -This file must be a text file containing the public key of the NNS to be used. - == Development configuration files These configuration files should only be used for development and testing purposes. -=== backup.conf - -This file configures the usage of the backup spool directory. - -Must be a file of key/value pairs separated by "=" (one per line) with the following possible keys: - -- *backup_retention_time_secs*: The maximum age of any file or directory kept in the backup spool. - -- *backup_purging_interval_secs*: The interval at which the backup spool directory will be scanned for files to delete. - -This configuration file should only be used for testnet deployments (to achieve shorter retention times) and must be missing for production deployments, where suitable production default values are assumed. - -=== filebeat.conf - -Configures filebeat to export logs out of the system. - -Must be a file of key/value pairs separated by "=" (one per line) with the following possible keys: - -- elasticsearch_hosts: Space-separated lists of hosts to ship logs to. -- elasticsearch_tags: Space-separated list of tags to apply to exported log records. - -If left unspecified, filebeat will be left unconfigured and no logs are exported. - -=== socks_proxy.conf - -Configuration for socks proxy. - -Must be a file of key/value pairs separated by "=" (one per line) with the following possible keys: - -- socks_proxy: URL of the socks proxy to use. E.g socks5://socksproxy.com:1080 - -=== bitcoin_addr.conf - -Configuration for bitcoin adapter. - -Must be a file of key/value pairs separated by "=" (one per line) with the following possible keys: - -- bitcoind_addr: Address of the bitcoind to be contacted by bitcoin adapter service. - -If left unspecified, the bitcoin adapter will not work properly due to lack of external system to contact. - == Injecting external state *Typical bootstrap process:* On first boot, the system will perform technical initialization (filesystems, etc.) and afterwards, initialize itself to act as a node in the IC. The node is initialized using key generation on the node itself (such that the private key never leaves the node) and through joining the IC (the node gets the rest of its state via joining the IC). "Registration" to the target IC is initiated by the node itself by sending a Node Operator-signed "join" request to its NNS. @@ -141,14 +43,3 @@ This behavior is suitable for the following use cases: - Externally controlled join of a node to a subnet: In this case, ic-prep is used to prepare key material to the node, while ic-admin is used to modify the target NNS such that it "accepts" the new node as part of the IC. -=== ic_crypto - -Externally generated cryptographic keys. - -Must be a directory with contents matching the internal representation of the ic_crypto directory. When given, this provides the private keys of the node. If not given, the node will generate its own private/public key pair. - -=== ic_registry_local_store - -Initial registry state. - -Must be a directory with contents matching the internal representation of the ic_registry_local_store. When given, this provides the initial state of the registry. If not given, the node will fetch (initial) registry state from the NNS. diff --git a/ic-os/hostos/defs.bzl b/ic-os/hostos/defs.bzl index 8e8c9a2408d..e6c1af9dcc3 100644 --- a/ic-os/hostos/defs.bzl +++ b/ic-os/hostos/defs.bzl @@ -77,6 +77,9 @@ def image_deps(mode, _malicious = False): deps.update(image_variants[mode]) + if "dev" in mode: + deps["rootfs"].update({"//ic-os/components:hostos-scripts/generate-guestos-config/dev-generate-guestos-config.sh": "/opt/ic/bin/generate-guestos-config.sh:0755"}) + return deps # Inject a step building an LVM partition. This depends on boot and root built diff --git a/publish/binaries/BUILD.bazel b/publish/binaries/BUILD.bazel index e0d4956190f..54891371e49 100644 --- a/publish/binaries/BUILD.bazel +++ b/publish/binaries/BUILD.bazel @@ -56,14 +56,11 @@ TESTONLY_BINARIES = [ "drun", "e2e-test-driver", "ic-admin", - "ic-backup", "ic-boundary", "ic-boundary-tls", "ic-https-outcalls-adapter", "ic-nns-init", "ic-prep", - "ic-recovery", - "ic-replay", "ic-starter", "ic-test-state-machine", "pocket-ic", diff --git a/rs/backup/BUILD.bazel b/rs/backup/BUILD.bazel index 798ec85fe74..4e3547e9e0f 100644 --- a/rs/backup/BUILD.bazel +++ b/rs/backup/BUILD.bazel @@ -36,7 +36,6 @@ ALIASES = {} rust_library( name = "backup", - testonly = True, srcs = glob(["src/**"]), aliases = ALIASES, crate_name = "ic_backup", @@ -50,7 +49,6 @@ rust_library( rust_binary( name = "ic-backup", - testonly = True, srcs = ["src/main.rs"], aliases = ALIASES, proc_macro_deps = MACRO_DEPENDENCIES, diff --git a/rs/bitcoin/ckbtc/minter/src/management.rs b/rs/bitcoin/ckbtc/minter/src/management.rs index fc65486b01e..418577d7c5b 100644 --- a/rs/bitcoin/ckbtc/minter/src/management.rs +++ b/rs/bitcoin/ckbtc/minter/src/management.rs @@ -13,7 +13,6 @@ use ic_cdk::api::call::RejectionCode; use ic_ckbtc_kyt::{DepositRequest, Error as KytError, FetchAlertsResponse, WithdrawalAttempt}; use ic_management_canister_types::{ DerivationPath, ECDSAPublicKeyArgs, ECDSAPublicKeyResponse, EcdsaCurve, EcdsaKeyId, - SignWithECDSAArgs, SignWithECDSAReply, }; use serde::de::DeserializeOwned; use std::fmt; @@ -275,22 +274,27 @@ pub async fn sign_with_ecdsa( derivation_path: DerivationPath, message_hash: [u8; 32], ) -> Result, CallError> { - const CYCLES_PER_SIGNATURE: u64 = 25_000_000_000; + use ic_cdk::api::management_canister::ecdsa::{ + sign_with_ecdsa, EcdsaCurve, EcdsaKeyId, SignWithEcdsaArgument, + }; - let reply: SignWithECDSAReply = call( - "sign_with_ecdsa", - CYCLES_PER_SIGNATURE, - &SignWithECDSAArgs { - message_hash, - derivation_path, - key_id: EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: key_name.clone(), - }, + let result = sign_with_ecdsa(SignWithEcdsaArgument { + message_hash: message_hash.to_vec(), + derivation_path: derivation_path.into_inner(), + key_id: EcdsaKeyId { + curve: EcdsaCurve::Secp256k1, + name: key_name.clone(), }, - ) - .await?; - Ok(reply.signature) + }) + .await; + + match result { + Ok((reply,)) => Ok(reply.signature), + Err((code, msg)) => Err(CallError { + method: "sign_with_ecdsa".to_string(), + reason: Reason::from_reject(code, msg), + }), + } } /// Requests alerts for the given UTXO. diff --git a/rs/bitcoin/kyt/BUILD.bazel b/rs/bitcoin/kyt/BUILD.bazel index 80f90a6084f..0a05e9c3ba0 100644 --- a/rs/bitcoin/kyt/BUILD.bazel +++ b/rs/bitcoin/kyt/BUILD.bazel @@ -16,6 +16,7 @@ rust_library( "@crate_index//:bitcoin_0_32", "@crate_index//:candid", "@crate_index//:futures", + "@crate_index//:ic-btc-interface", "@crate_index//:ic-cdk", "@crate_index//:serde", ], @@ -28,6 +29,8 @@ rust_test( # Keep sorted. "@crate_index//:bitcoin_0_32", "@crate_index//:candid_parser", + "@crate_index//:ic-btc-interface", + "@crate_index//:tokio", ], ) @@ -41,6 +44,7 @@ rust_test( deps = [ # Keep sorted. "@crate_index//:candid_parser", + "@crate_index//:ic-btc-interface", ], ) @@ -58,6 +62,7 @@ rust_canister( "@crate_index//:candid", "@crate_index//:candid_parser", "@crate_index//:futures", + "@crate_index//:ic-btc-interface", "@crate_index//:ic-cdk", ], ) @@ -77,6 +82,11 @@ rust_ic_test( "//packages/pocket-ic", "//rs/pocket_ic_server:pocket-ic-server", "//rs/test_utilities/load_wasm", + "//rs/types/base_types", + "//rs/types/types", + "//rs/universal_canister/lib", "@crate_index//:candid", + "@crate_index//:ic-btc-interface", + "@crate_index//:ic-cdk", ], ) diff --git a/rs/bitcoin/kyt/Cargo.toml b/rs/bitcoin/kyt/Cargo.toml index 32f10037ecd..a14767dc038 100644 --- a/rs/bitcoin/kyt/Cargo.toml +++ b/rs/bitcoin/kyt/Cargo.toml @@ -14,10 +14,15 @@ path = "src/main.rs" bitcoin = { version = "0.32.2", default-features = false } candid = { workspace = true } futures = { workspace = true } +ic-btc-interface = { workspace = true } ic-cdk = { workspace = true } serde = { workspace = true } [dev-dependencies] candid_parser = { workspace = true } +ic-base-types = { path = "../../types/base_types" } +ic-types = { path = "../../types/types" } ic-test-utilities-load-wasm = { path = "../../test_utilities/load_wasm" } +ic-universal-canister = { path = "../../universal_canister/lib" } pocket-ic = { path = "../../../packages/pocket-ic" } +tokio = { workspace = true } diff --git a/rs/bitcoin/kyt/btc_kyt_canister.did b/rs/bitcoin/kyt/btc_kyt_canister.did index 9382df9d6a5..c5f04f8a4c1 100644 --- a/rs/bitcoin/kyt/btc_kyt_canister.did +++ b/rs/bitcoin/kyt/btc_kyt_canister.did @@ -1,16 +1,60 @@ -type tx_id = text; - -type bitcoin_address = text; +type BitcoinAddress = text; type CheckAddressArgs = record { // Bitcoin address to be checked. - address: bitcoin_address; + address: BitcoinAddress; }; type CheckAddressResponse = variant { Passed; Failed }; +type CheckTransactionArgs = record { txid : blob }; + +// The result of a check_transaction call. +type CheckTransactionResponse = variant { + // When check finishes and all input addresses passed KYT. + Passed; + // When check finishes and one or more input addresses failed KYT. + // The list of failed addresses are returned as a best effort, which may be non-exhaustive. + Failed: vec BitcoinAddress; + // Unknown case where it is unable to give a final answer of Passed or Failed. + // The caller should examine the status and decide how to handle it. + Unknown: CheckTransactionStatus; +}; + +type CheckTransactionStatus = variant { + // Caller should call with a minimum of 40 billion cycles. + NotEnoughCycles; + // The result is pending, and the caller can call again later. + Pending: CheckTransactionPending; + /// The result is unknown due to an irrecoverable error. + Error: CheckTransactionError; +}; + +type CheckTransactionPending = variant { + // Work is already in progress, and the result is pending. + Pending; + // The service is experience high load. + HighLoad; + // There was a transient error fetching data. + TransientInternalError: text; +}; + +type CheckTransactionError = variant { + // Response size is too large (>400kB) when fetching transaction data. + ResponseTooLarge : record { txid: blob }; + // Invalid transaction, e.g. error decoding transaction or transaction id mismatch, etc. + InvalidTransaction : text; +}; + service : { - get_inputs: (tx_id) -> (vec bitcoin_address); + // Check input addresses of a transaction matching the given transaction id. + // See `CheckTransactionResponse` for more details on the return result. + // + // The caller should attach at least 40 billion cycles with each call. + // The actual cost may be well less than that, and unspent cycles will be refunded. + // There is also a service charge of 0.1 billion cycles for each call, regardless + // of the return result. + check_transaction: (CheckTransactionArgs) -> (CheckTransactionResponse); // Return `Passed` if the given bitcoin address passes the KYT check, or `Failed` otherwise. // May throw error (trap) if the given address is malformed or not a mainnet address. diff --git a/rs/bitcoin/kyt/src/fetch.rs b/rs/bitcoin/kyt/src/fetch.rs new file mode 100644 index 00000000000..cf335421432 --- /dev/null +++ b/rs/bitcoin/kyt/src/fetch.rs @@ -0,0 +1,249 @@ +use crate::state::{FetchGuardError, FetchTxStatus, FetchedTx, HttpGetTxError}; +use crate::types::{ + CheckTransactionError, CheckTransactionPending, CheckTransactionResponse, + CheckTransactionStatus, +}; +use crate::{blocklist_contains, state}; +use bitcoin::{address::FromScriptError, Address, Network, Transaction}; +use futures::future::try_join_all; +use ic_btc_interface::Txid; +use std::convert::Infallible; + +#[cfg(test)] +mod tests; + +pub fn get_tx_cycle_cost(max_response_bytes: u32) -> u128 { + // 1 KiB for request, max_response_bytes for response + 49_140_000 + 1024 * 5_200 + 10_400 * (max_response_bytes as u128) +} + +/// Caller of check_transaction must attach this amount of cycles with the call. +pub const CHECK_TRANSACTION_CYCLES_REQUIRED: u128 = 40_000_000_000; + +/// One-time charge for every check_transaction call. +pub const CHECK_TRANSACTION_CYCLES_SERVICE_FEE: u128 = 100_000_000; + +// The max_response_bytes is initially set to 4kB, and then +// increased to 400kB if the initial size isn't enough. +// - The maximum size of a standard non-taproot transaction is 400k vBytes. +// - Taproot transactions could be as big as full block size (4MiB). +// - Currently a subnet's maximum response size is only 2MiB. +// - Transaction size between 400kB and 2MiB are also uncommon, we could +// handle them in the future if required. +// - Transactions bigger than 2MiB are very rare, and we can't handle them. + +/// Initial max response bytes is 4kB +pub const INITIAL_MAX_RESPONSE_BYTES: u32 = 4 * 1024; + +/// Retry max response bytes is 400kB +pub const RETRY_MAX_RESPONSE_BYTES: u32 = 400 * 1024; + +pub enum FetchResult { + RetryWithBiggerBuffer, + Error(HttpGetTxError), + Fetched(FetchedTx), +} + +pub enum TryFetchResult { + Pending, + HighLoad, + Error(HttpGetTxError), + NotEnoughCycles, + Fetched(FetchedTx), + ToFetch(F), +} + +/// Trait that abstracts over system functions like fetching transaction, calcuating cycles, etc. +pub trait FetchEnv { + type FetchGuard; + fn new_fetch_guard(&self, txid: Txid) -> Result; + async fn http_get_tx( + &self, + txid: Txid, + max_response_bytes: u32, + ) -> Result; + fn cycles_accept(&self, cycles: u128) -> u128; + fn cycles_available(&self) -> u128; + + /// Try to fetch a transaction given its txid: + /// - If it is already available, return `Fetched`. + /// - If it is already pending, return `Pending`. + /// - If it is pending retry or not found, return a future that calls `fetch_tx`. + /// - Or return other conditions like `HighLoad` or `Error`. + fn try_fetch_tx( + &self, + txid: Txid, + ) -> TryFetchResult>> { + let max_response_bytes = match state::get_fetch_status(txid) { + None => INITIAL_MAX_RESPONSE_BYTES, + Some(FetchTxStatus::PendingRetry { + max_response_bytes, .. + }) => max_response_bytes, + Some(FetchTxStatus::PendingOutcall { .. }) => return TryFetchResult::Pending, + Some(FetchTxStatus::Error(msg)) => return TryFetchResult::Error(msg), + Some(FetchTxStatus::Fetched(fetched)) => return TryFetchResult::Fetched(fetched), + }; + let guard = match self.new_fetch_guard(txid) { + Ok(guard) => guard, + Err(_) => return TryFetchResult::HighLoad, + }; + let cycle_cost = get_tx_cycle_cost(max_response_bytes); + if self.cycles_accept(cycle_cost) < cycle_cost { + TryFetchResult::NotEnoughCycles + } else { + TryFetchResult::ToFetch(self.fetch_tx(guard, txid, max_response_bytes)) + } + } + + /// Fetch a transaction using http outcall by its txid and set its status to: + /// - `Fetched`, if it is available. + /// - `PendingRetry`, if the allocated buffer for outcall wasn't enough. + /// - `Error`, if an irrecoverable error happened during the outcall of `http_get_tx`. + /// + /// Return the correponding `FetchResult`. + /// + /// Note that this function does not return any error, but due to requirements + /// of `try_join_all` it must return a `Result` type. + async fn fetch_tx( + &self, + _guard: Self::FetchGuard, + txid: Txid, + max_response_bytes: u32, + ) -> Result { + match self.http_get_tx(txid, max_response_bytes).await { + Ok(tx) => { + let input_addresses = tx.input.iter().map(|_| None).collect(); + let fetched = FetchedTx { + tx, + input_addresses, + }; + state::set_fetch_status(txid, FetchTxStatus::Fetched(fetched.clone())); + Ok(FetchResult::Fetched(fetched)) + } + Err(HttpGetTxError::ResponseTooLarge) + if max_response_bytes < RETRY_MAX_RESPONSE_BYTES => + { + state::set_fetch_status( + txid, + FetchTxStatus::PendingRetry { + max_response_bytes: RETRY_MAX_RESPONSE_BYTES, + }, + ); + Ok(FetchResult::RetryWithBiggerBuffer) + } + Err(err) => { + state::set_fetch_status(txid, FetchTxStatus::Error(err.clone())); + Ok(FetchResult::Error(err)) + } + } + } + + /// After a transaction is successfully fetched, we still need to fetch + /// all of its inputs in order to calculate input addresses. The steps + /// are described as follows: + /// - Fetch more if there are transaction inputs to be fetched and checked. + /// - When they are done, calculate input addresses and record them. + /// - For those failed due to insufficient outcall response buffer, mark their status + /// as `PendingRetry`. + /// - If we are short of cycles and couldn't fetch all inputs, return `NotEnoughCycles`. + /// - When all inputs are fetched, compute their addresses and return `Passed` + /// if all of them pass the check. Otherwise return `Failed`. + /// + /// Pre-condition: `txid` already exists in state with a `Fetched` status. + async fn check_fetched(&self, txid: Txid, fetched: &FetchedTx) -> CheckTransactionResponse { + // Return Passed or Failed when all checks are complete, or None otherwise. + fn check_completed(fetched: &FetchedTx) -> Option { + if fetched.input_addresses.iter().all(|x| x.is_some()) { + // We have obtained all input addresses. + for address in fetched.input_addresses.iter().flatten() { + if blocklist_contains(address) { + return Some(CheckTransactionResponse::Failed(vec![address.to_string()])); + } + } + Some(CheckTransactionResponse::Passed) + } else { + None + } + } + + if let Some(result) = check_completed(fetched) { + return result; + } + + let mut futures = vec![]; + let mut jobs = vec![]; + for (index, input) in fetched.tx.input.iter().enumerate() { + if fetched.input_addresses[index].is_none() { + use TryFetchResult::*; + let input_txid = Txid::from(*(input.previous_output.txid.as_ref() as &[u8; 32])); + match self.try_fetch_tx(input_txid) { + ToFetch(do_fetch) => { + jobs.push((index, input_txid, input.previous_output.vout)); + futures.push(do_fetch) + } + Fetched(fetched) => { + let vout = input.previous_output.vout; + match transaction_output_address(&fetched.tx, vout) { + Ok(address) => state::set_fetched_address(txid, index, address), + Err(err) => { + return CheckTransactionError::InvalidTransaction(err.to_string()) + .into() + } + } + } + Pending => continue, + HighLoad | NotEnoughCycles | Error(_) => break, + } + } + } + + if futures.is_empty() { + // Return NotEnoughCycles if we have deducted all available cycles + if self.cycles_available() == 0 { + return CheckTransactionStatus::NotEnoughCycles.into(); + } else { + return CheckTransactionPending::HighLoad.into(); + } + } + + let fetch_results = try_join_all(futures) + .await + .unwrap_or_else(|err| unreachable!("error in try_join_all {:?}", err)); + + let mut error = None; + for (i, result) in fetch_results.into_iter().enumerate() { + let (index, input_txid, vout) = jobs[i]; + match result { + FetchResult::Fetched(fetched) => { + match transaction_output_address(&fetched.tx, vout) { + Ok(address) => state::set_fetched_address(txid, index, address), + Err(err) => { + error = Some( + CheckTransactionError::InvalidTransaction(format!("{:?}", err)) + .into(), + ); + } + } + } + FetchResult::Error(err) => error = Some((input_txid, err).into()), + FetchResult::RetryWithBiggerBuffer => (), + } + } + if let Some(err) = error { + return err; + } + // Check again to see if we have completed + match state::get_fetch_status(txid).and_then(|result| match result { + FetchTxStatus::Fetched(fetched) => check_completed(&fetched), + _ => None, + }) { + Some(result) => result, + None => CheckTransactionPending::Pending.into(), + } + } +} + +fn transaction_output_address(tx: &Transaction, vout: u32) -> Result { + let output = &tx.output[vout as usize]; + Address::from_script(&output.script_pubkey, Network::Bitcoin) +} diff --git a/rs/bitcoin/kyt/src/fetch/tests.rs b/rs/bitcoin/kyt/src/fetch/tests.rs new file mode 100644 index 00000000000..3c98b337cb8 --- /dev/null +++ b/rs/bitcoin/kyt/src/fetch/tests.rs @@ -0,0 +1,460 @@ +use super::*; +use crate::blocklist; +use bitcoin::{ + absolute::LockTime, hashes::Hash, transaction::Version, Amount, OutPoint, PubkeyHash, + ScriptBuf, Sequence, Transaction, TxIn, TxOut, Witness, +}; +use ic_cdk::api::call::RejectionCode; +use std::cell::RefCell; +use std::collections::VecDeque; +use std::str::FromStr; + +// A mock environment that provides simulated `get_tx` implementation, with +// mock states and transactions used for testing purpose. +struct MockEnv { + high_load: bool, + calls: RefCell>, + replies: RefCell>>, + available_cycles: RefCell, + accepted_cycles: RefCell, +} + +impl FetchEnv for MockEnv { + type FetchGuard = (); + + fn new_fetch_guard(&self, _txid: Txid) -> Result { + if self.high_load { + Err(FetchGuardError::NoCapacity) + } else { + Ok(()) + } + } + async fn http_get_tx( + &self, + txid: Txid, + max_response_bytes: u32, + ) -> Result { + self.calls + .borrow_mut() + .push_back((txid, max_response_bytes)); + self.replies + .borrow_mut() + .pop_front() + .unwrap_or(Err(HttpGetTxError::Rejected { + code: RejectionCode::from(0), + message: "no more reply".to_string(), + })) + } + fn cycles_accept(&self, cycles: u128) -> u128 { + let mut available = self.available_cycles.borrow_mut(); + let mut accepted = self.accepted_cycles.borrow_mut(); + let cycles = cycles.min(*available); + *accepted += cycles; + *available -= cycles; + cycles + } + fn cycles_available(&self) -> u128 { + *self.available_cycles.borrow() + } +} + +impl MockEnv { + fn new(available_cycles: u128) -> Self { + Self { + high_load: false, + calls: RefCell::new(VecDeque::new()), + replies: RefCell::new(VecDeque::new()), + available_cycles: RefCell::new(available_cycles), + accepted_cycles: RefCell::new(0), + } + } + fn assert_get_tx_call(&self, txid: Txid, max_response_bytes: u32) { + assert_eq!( + self.calls.borrow_mut().pop_front(), + Some((txid, max_response_bytes)) + ) + } + fn assert_no_more_get_tx_call(&self) { + assert_eq!(self.calls.borrow_mut().pop_front(), None) + } + fn expect_get_tx_with_reply(&self, reply: Result) { + self.replies.borrow_mut().push_back(reply) + } + fn refill_cycles(&self, cycles: u128) { + *self.available_cycles.borrow_mut() = cycles; + } + fn cycles_accepted(&self) -> u128 { + *self.accepted_cycles.borrow() + } +} + +fn mock_txid(v: u8) -> Txid { + Txid::from([v; 32]) +} + +fn mock_transaction() -> Transaction { + Transaction { + version: Version::ONE, + lock_time: LockTime::ZERO, + input: Vec::new(), + output: Vec::new(), + } +} + +fn mock_transaction_with_outputs(num_outputs: usize) -> Transaction { + let mut tx = mock_transaction(); + let output = (0..num_outputs) + .map(|i| TxOut { + value: Amount::ONE_SAT, + script_pubkey: ScriptBuf::new_p2pkh(&PubkeyHash::from_slice(&[i as u8; 20]).unwrap()), + }) + .collect(); + tx.output = output; + tx +} + +fn mock_transaction_with_inputs(input_txids: Vec<(Txid, u32)>) -> Transaction { + let mut tx = mock_transaction(); + let input = input_txids + .into_iter() + .enumerate() + .map(|(i, (txid, vout))| TxIn { + previous_output: OutPoint { + txid: bitcoin::Txid::from_slice(txid.as_ref()).unwrap(), + vout, + }, + script_sig: ScriptBuf::from_bytes(vec![i as u8; 32]), + sequence: Sequence::ZERO, + witness: Witness::new(), + }) + .collect(); + tx.input = input; + tx +} + +#[tokio::test] +async fn test_mock_env() { + // Test cycle mock functions + let env = MockEnv::new(CHECK_TRANSACTION_CYCLES_REQUIRED); + assert_eq!( + env.cycles_accept(CHECK_TRANSACTION_CYCLES_SERVICE_FEE), + CHECK_TRANSACTION_CYCLES_SERVICE_FEE + ); + let available = env.cycles_available(); + assert_eq!( + available, + CHECK_TRANSACTION_CYCLES_REQUIRED - CHECK_TRANSACTION_CYCLES_SERVICE_FEE + ); + assert_eq!( + env.cycles_accept(CHECK_TRANSACTION_CYCLES_REQUIRED), + available + ); + + // Test get_tx mock function + let env = MockEnv::new(0); + let txid = mock_txid(0); + env.expect_get_tx_with_reply(Ok(mock_transaction())); + let result = env.http_get_tx(txid, INITIAL_MAX_RESPONSE_BYTES).await; + assert!(result.is_ok()); + env.assert_get_tx_call(txid, INITIAL_MAX_RESPONSE_BYTES); + env.assert_no_more_get_tx_call(); +} + +#[test] +fn test_try_fetch_tx() { + let mut env = MockEnv::new(CHECK_TRANSACTION_CYCLES_REQUIRED); + let txid_0 = mock_txid(0); + let txid_1 = mock_txid(1); + let txid_2 = mock_txid(2); + + // case Fetched + let fetched_0 = FetchTxStatus::Fetched(FetchedTx { + tx: mock_transaction(), + input_addresses: vec![None], + }); + state::set_fetch_status(txid_0, fetched_0.clone()); + assert!(matches!( + env.try_fetch_tx(txid_0), + TryFetchResult::Fetched(_) + )); + + // case Pending + state::set_fetch_status(txid_1, FetchTxStatus::PendingOutcall); + assert!(matches!(env.try_fetch_tx(txid_1), TryFetchResult::Pending)); + + // case HighLoad + env.high_load = true; + assert!(matches!(env.try_fetch_tx(txid_2), TryFetchResult::HighLoad)); + env.high_load = false; + + // case NotEnoughCycles + assert!(matches!( + MockEnv::new(0).try_fetch_tx(txid_2), + TryFetchResult::NotEnoughCycles + )); + + // case ToFetch + let available = env.cycles_available(); + assert!(state::get_fetch_status(txid_2).is_none()); + assert!(matches!( + env.try_fetch_tx(mock_txid(2)), + TryFetchResult::ToFetch(_) + )); + assert_eq!( + env.cycles_available(), + available - get_tx_cycle_cost(INITIAL_MAX_RESPONSE_BYTES) + ); +} + +#[tokio::test] +async fn test_fetch_tx() { + let env = MockEnv::new(CHECK_TRANSACTION_CYCLES_REQUIRED); + let txid_0 = mock_txid(0); + let txid_1 = mock_txid(1); + let txid_2 = mock_txid(2); + + // case Fetched + let tx_0 = mock_transaction_with_inputs(vec![(txid_1, 0), (txid_2, 1)]); + + env.expect_get_tx_with_reply(Ok(tx_0.clone())); + let result = env.fetch_tx((), txid_0, INITIAL_MAX_RESPONSE_BYTES).await; + assert!(matches!(result, Ok(FetchResult::Fetched(_)))); + assert!(matches!( + state::get_fetch_status(txid_0), + Some(FetchTxStatus::Fetched(_)) + )); + if let Ok(FetchResult::Fetched(fetched)) = result { + assert_eq!(fetched.tx, tx_0); + assert_eq!(fetched.input_addresses, vec![None, None]); + } else { + unreachable!() + } + + // case RetryWithBiggerBuffer + env.expect_get_tx_with_reply(Err(HttpGetTxError::ResponseTooLarge)); + let result = env.fetch_tx((), txid_1, INITIAL_MAX_RESPONSE_BYTES).await; + assert!(matches!(result, Ok(FetchResult::RetryWithBiggerBuffer))); + assert!(matches!( + state::get_fetch_status(txid_1), + Some(FetchTxStatus::PendingRetry { max_response_bytes }) if max_response_bytes == RETRY_MAX_RESPONSE_BYTES)); + + // case Err + env.expect_get_tx_with_reply(Err(HttpGetTxError::TxEncoding( + "failed to decode tx".to_string(), + ))); + let result = env.fetch_tx((), txid_2, INITIAL_MAX_RESPONSE_BYTES).await; + assert!(matches!( + result, + Ok(FetchResult::Error(HttpGetTxError::TxEncoding(_))) + )); + assert!(matches!( + state::get_fetch_status(txid_2), + Some(FetchTxStatus::Error(HttpGetTxError::TxEncoding(_))) + )); +} + +#[tokio::test] +async fn test_check_fetched() { + let mut env = MockEnv::new(CHECK_TRANSACTION_CYCLES_REQUIRED); + let good_address = Address::from_str("12cbQLTFMXRnSzktFkuoG3eHoMeFtpTu3S") + .unwrap() + .assume_checked(); + let bad_address = Address::from_str(blocklist::BTC_ADDRESS_BLOCKLIST[0]) + .unwrap() + .assume_checked(); + + let txid_0 = mock_txid(0); + let txid_1 = mock_txid(1); + let txid_2 = mock_txid(2); + let tx_0 = mock_transaction_with_inputs(vec![(txid_1, 0), (txid_2, 1)]); + let tx_1 = mock_transaction_with_outputs(1); + let tx_2 = mock_transaction_with_outputs(2); + + // case Passed + let fetched = FetchedTx { + tx: tx_0.clone(), + input_addresses: vec![Some(good_address.clone())], + }; + state::set_fetch_status(txid_0, FetchTxStatus::Fetched(fetched.clone())); + assert!(matches!( + env.check_fetched(txid_0, &fetched).await, + CheckTransactionResponse::Passed + )); + // Check accepted cycles + assert_eq!(env.cycles_accepted(), 0); + + // case Failed + let fetched = FetchedTx { + tx: tx_0.clone(), + input_addresses: vec![Some(good_address.clone()), Some(bad_address)], + }; + state::set_fetch_status(txid_0, FetchTxStatus::Fetched(fetched.clone())); + assert!(matches!( + env.check_fetched(txid_0, &fetched).await, + CheckTransactionResponse::Failed(_) + )); + // Check accepted cycle + assert_eq!(env.cycles_accepted(), 0); + + // case HighLoad + env.high_load = true; + let fetched = FetchedTx { + tx: tx_0.clone(), + input_addresses: vec![Some(good_address), None], + }; + state::set_fetch_status(txid_0, FetchTxStatus::Fetched(fetched.clone())); + assert!(matches!( + env.check_fetched(txid_0, &fetched).await, + CheckTransactionResponse::Unknown(CheckTransactionStatus::Pending( + CheckTransactionPending::HighLoad + )) + )); + // Check accepted cycle + assert_eq!(env.cycles_accepted(), 0); + env.high_load = false; + + // case NotEnoughCycles + let env = MockEnv::new(get_tx_cycle_cost(INITIAL_MAX_RESPONSE_BYTES) / 2); + assert!(matches!( + env.check_fetched(txid_0, &fetched).await, + CheckTransactionResponse::Unknown(CheckTransactionStatus::NotEnoughCycles) + )); + // Check available cycles: we deduct all remaining cycles even when they are not enough + assert_eq!(env.cycles_available(), 0); + + // case Pending: need 2 inputs, but only able to get 1 for now + let env = MockEnv::new(get_tx_cycle_cost(INITIAL_MAX_RESPONSE_BYTES) * 3 / 2); + let fetched = FetchedTx { + tx: tx_0.clone(), + input_addresses: vec![None, None], + }; + state::set_fetch_status(txid_0, FetchTxStatus::Fetched(fetched.clone())); + env.expect_get_tx_with_reply(Ok(tx_1.clone())); + assert!(matches!( + env.check_fetched(txid_0, &fetched).await, + CheckTransactionResponse::Unknown(CheckTransactionStatus::Pending( + CheckTransactionPending::Pending + )) + )); + // Check remaining cycle: we deduct all remaining cycles when they are not enough + assert_eq!(env.cycles_available(), 0); + // Continue to get another one, should pass + env.refill_cycles(get_tx_cycle_cost(INITIAL_MAX_RESPONSE_BYTES)); + env.expect_get_tx_with_reply(Ok(tx_2.clone())); + assert!(matches!( + env.check_fetched(txid_0, &fetched).await, + CheckTransactionResponse::Passed + )); + // Check remaining cycle + assert_eq!(env.cycles_available(), 0); + + // case Passed: need 2 inputs, and getting both + let env = MockEnv::new(CHECK_TRANSACTION_CYCLES_REQUIRED); + let fetched = FetchedTx { + tx: tx_0.clone(), + input_addresses: vec![None, None], + }; + state::set_fetch_status(txid_0, FetchTxStatus::Fetched(fetched.clone())); + state::clear_fetch_status(txid_1); + state::clear_fetch_status(txid_2); + env.expect_get_tx_with_reply(Ok(tx_1.clone())); + env.expect_get_tx_with_reply(Ok(tx_2.clone())); + assert!(matches!( + env.check_fetched(txid_0, &fetched).await, + CheckTransactionResponse::Passed + )); + // Check remaining cycle + assert_eq!( + env.cycles_available(), + CHECK_TRANSACTION_CYCLES_REQUIRED - get_tx_cycle_cost(INITIAL_MAX_RESPONSE_BYTES) * 2 + ); + + // case Passed: need 2 inputs, and 1 already exists in cache. + let env = MockEnv::new(CHECK_TRANSACTION_CYCLES_REQUIRED); + let fetched = FetchedTx { + tx: tx_0.clone(), + input_addresses: vec![None, None], + }; + state::set_fetch_status(txid_0, FetchTxStatus::Fetched(fetched.clone())); + state::set_fetch_status( + txid_1, + FetchTxStatus::Fetched(FetchedTx { + tx: tx_1.clone(), + input_addresses: vec![], + }), + ); + state::clear_fetch_status(txid_2); + env.expect_get_tx_with_reply(Ok(tx_2.clone())); + assert!(matches!( + env.check_fetched(txid_0, &fetched).await, + CheckTransactionResponse::Passed + )); + // Check remaining cycle + assert_eq!( + env.cycles_available(), + CHECK_TRANSACTION_CYCLES_REQUIRED - get_tx_cycle_cost(INITIAL_MAX_RESPONSE_BYTES) + ); + + // case Pending: need 2 input, but 1 of them gives RetryWithBiggerBuffer error. + let env = MockEnv::new(CHECK_TRANSACTION_CYCLES_REQUIRED); + let fetched = FetchedTx { + tx: tx_0.clone(), + input_addresses: vec![None, None], + }; + state::set_fetch_status(txid_0, FetchTxStatus::Fetched(fetched.clone())); + state::clear_fetch_status(txid_1); + state::clear_fetch_status(txid_2); + env.expect_get_tx_with_reply(Ok(tx_1.clone())); + env.expect_get_tx_with_reply(Err(HttpGetTxError::ResponseTooLarge)); + assert!(matches!( + env.check_fetched(txid_0, &fetched).await, + CheckTransactionResponse::Unknown(CheckTransactionStatus::Pending( + CheckTransactionPending::Pending + )) + )); + // Try again with bigger buffer, should Pass + env.expect_get_tx_with_reply(Ok(tx_2.clone())); + assert!(matches!( + env.check_fetched(txid_0, &fetched).await, + CheckTransactionResponse::Passed + )); + // Check remaining cycle + assert_eq!( + env.cycles_available(), + CHECK_TRANSACTION_CYCLES_REQUIRED + - get_tx_cycle_cost(INITIAL_MAX_RESPONSE_BYTES) * 2 + - get_tx_cycle_cost(RETRY_MAX_RESPONSE_BYTES) + ); + + // case Error: need 2 input, but 1 of them keeps giving RetryWithBiggerBuffer error. + let env = MockEnv::new(CHECK_TRANSACTION_CYCLES_REQUIRED); + let fetched = FetchedTx { + tx: tx_0.clone(), + input_addresses: vec![None, None], + }; + state::set_fetch_status(txid_0, FetchTxStatus::Fetched(fetched.clone())); + state::clear_fetch_status(txid_1); + state::clear_fetch_status(txid_2); + env.expect_get_tx_with_reply(Ok(tx_1.clone())); + env.expect_get_tx_with_reply(Err(HttpGetTxError::ResponseTooLarge)); + assert!(matches!( + env.check_fetched(txid_0, &fetched).await, + CheckTransactionResponse::Unknown(CheckTransactionStatus::Pending( + CheckTransactionPending::Pending + )) + )); + // Try again with bigger buffer, still fails + env.expect_get_tx_with_reply(Err(HttpGetTxError::ResponseTooLarge)); + assert!(matches!( + env.check_fetched(txid_0, &fetched).await, + CheckTransactionResponse::Unknown(CheckTransactionStatus::Error( + CheckTransactionError::ResponseTooLarge { txid } + )) if txid_2.as_ref() == txid)); + // Check remaining cycle + assert_eq!( + env.cycles_available(), + CHECK_TRANSACTION_CYCLES_REQUIRED + - get_tx_cycle_cost(INITIAL_MAX_RESPONSE_BYTES) * 2 + - get_tx_cycle_cost(RETRY_MAX_RESPONSE_BYTES) + ); +} diff --git a/rs/bitcoin/kyt/src/lib.rs b/rs/bitcoin/kyt/src/lib.rs index bb2f17a64e7..94bdbf5541d 100644 --- a/rs/bitcoin/kyt/src/lib.rs +++ b/rs/bitcoin/kyt/src/lib.rs @@ -1,9 +1,5 @@ -use bitcoin::{ - address::FromScriptError, - consensus::{encode, Decodable}, - Address, Network, Transaction, -}; -use futures::future::try_join_all; +use bitcoin::{consensus::Decodable, Address, Transaction}; +use ic_btc_interface::Txid; use ic_cdk::api::call::RejectionCode; use ic_cdk::api::management_canister::http_request::{ http_request, CanisterHttpRequestArgument, HttpHeader, HttpMethod, TransformContext, @@ -11,21 +7,31 @@ use ic_cdk::api::management_canister::http_request::{ }; pub mod blocklist; +mod fetch; +mod state; mod types; + +pub use fetch::{ + get_tx_cycle_cost, CHECK_TRANSACTION_CYCLES_REQUIRED, CHECK_TRANSACTION_CYCLES_SERVICE_FEE, + INITIAL_MAX_RESPONSE_BYTES, +}; +use fetch::{FetchEnv, FetchResult, TryFetchResult}; +use state::{FetchGuardError, HttpGetTxError}; pub use types::*; -#[derive(Debug)] -pub enum BitcoinTxError { - Address(FromScriptError), - Encoding(encode::Error), - TxIdMismatch { - expected: String, - decoded: String, - }, - Rejected { - code: RejectionCode, - message: String, - }, +impl From<(Txid, HttpGetTxError)> for CheckTransactionResponse { + fn from((txid, err): (Txid, HttpGetTxError)) -> CheckTransactionResponse { + let txid = txid.as_ref().to_vec(); + match err { + HttpGetTxError::Rejected { message, .. } => { + CheckTransactionPending::TransientInternalError(message).into() + } + HttpGetTxError::ResponseTooLarge => { + (CheckTransactionError::ResponseTooLarge { txid }).into() + } + _ => CheckTransactionError::InvalidTransaction(format!("{:?}", err)).into(), + } + } } pub fn blocklist_contains(address: &Address) -> bool { @@ -34,87 +40,129 @@ pub fn blocklist_contains(address: &Address) -> bool { .is_ok() } -pub async fn get_inputs_internal(tx_id: String) -> Result, BitcoinTxError> { - let tx = get_tx(tx_id).await?; +pub fn is_response_too_large(code: &RejectionCode, message: &str) -> bool { + code == &RejectionCode::SysFatal + && (message.contains("size limit") || message.contains("length limit")) +} + +struct KytCanisterEnv; - let mut addresses = vec![]; - let mut futures = vec![]; - let mut vouts = vec![]; +impl FetchEnv for KytCanisterEnv { + type FetchGuard = state::FetchGuard; - for input in tx.input.iter() { - vouts.push(input.previous_output.vout as usize); - futures.push(get_tx(input.previous_output.txid.to_string())); + fn new_fetch_guard(&self, txid: Txid) -> Result { + state::FetchGuard::new(txid) } - let input_txs = try_join_all(futures).await?; - for (index, input_tx) in input_txs.iter().enumerate() { - let output = &input_tx.output[vouts[index]]; - let address = Address::from_script(&output.script_pubkey, Network::Bitcoin) - .map_err(BitcoinTxError::Address)?; - addresses.push(address.to_string()); + async fn http_get_tx( + &self, + txid: Txid, + max_response_bytes: u32, + ) -> Result { + // TODO(XC-159): Support multiple providers + let host = "btcscan.org"; + let url = format!("https://{}/api/tx/{}/raw", host, txid); + let request_headers = vec![ + HttpHeader { + name: "Host".to_string(), + value: format!("{host}:443"), + }, + HttpHeader { + name: "User-Agent".to_string(), + value: "bitcoin_inputs_collector".to_string(), + }, + ]; + let request = CanisterHttpRequestArgument { + url: url.to_string(), + method: HttpMethod::GET, + body: None, + max_response_bytes: Some(max_response_bytes as u64), + transform: Some(TransformContext { + function: TransformFunc(candid::Func { + principal: ic_cdk::api::id(), + method: "transform".to_string(), + }), + context: vec![], + }), + headers: request_headers, + }; + let cycles = get_tx_cycle_cost(max_response_bytes); + match http_request(request, cycles).await { + Ok((response,)) => { + // Ensure response is 200 before decoding + if response.status != 200u32 { + let code = if response.status == 429u32 { + RejectionCode::SysTransient + } else { + RejectionCode::SysFatal + }; + return Err(HttpGetTxError::Rejected { + code, + message: format!("HTTP GET {} received code {}", url, response.status), + }); + } + let tx = Transaction::consensus_decode(&mut response.body.as_slice()) + .map_err(|err| HttpGetTxError::TxEncoding(err.to_string()))?; + // Verify the correctness of the transaction by recomputing the transaction ID. + let decoded_txid = tx.compute_txid(); + if decoded_txid.as_ref() as &[u8; 32] != txid.as_ref() { + return Err(HttpGetTxError::TxidMismatch { + expected: txid, + decoded: Txid::from(*(decoded_txid.as_ref() as &[u8; 32])), + }); + } + Ok(tx) + } + Err((r, m)) if is_response_too_large(&r, &m) => Err(HttpGetTxError::ResponseTooLarge), + Err((r, m)) => { + // TODO(XC-158): maybe try other providers and also log the error. + println!("The http_request resulted into error. RejectionCode: {r:?}, Error: {m}"); + Err(HttpGetTxError::Rejected { + code: r, + message: m, + }) + } + } } - Ok(addresses) + fn cycles_accept(&self, cycles: u128) -> u128 { + ic_cdk::api::call::msg_cycles_accept128(cycles) + } + fn cycles_available(&self) -> u128 { + ic_cdk::api::call::msg_cycles_available128() + } } -async fn get_tx(tx_id: String) -> Result { - // TODO(XC-159): Support multiple providers - let host = "btcscan.org"; - let url = format!("https://{}/api/tx/{}/raw", host, tx_id); - let request_headers = vec![ - HttpHeader { - name: "Host".to_string(), - value: format!("{host}:443"), - }, - HttpHeader { - name: "User-Agent".to_string(), - value: "bitcoin_inputs_collector".to_string(), - }, - ]; - // The max_response_bytes is set to 400KiB because: - // - The maximum size of a standard non-taproot transaction is 400k vBytes. - // - Taproot transactions could be as big as full block size (4MiB). - // - Currently a subnet's maximum response size is only 2MiB. - // - Transactions bigger than 2MiB are very rare. - // - // TODO(XC-171): Transactions between 400k and 2MiB are uncommon but may need to be handled. - let request = CanisterHttpRequestArgument { - url: url.to_string(), - method: HttpMethod::GET, - body: None, - max_response_bytes: Some(400 * 1024), // 400 KiB - transform: Some(TransformContext { - function: TransformFunc(candid::Func { - principal: ic_cdk::api::id(), - method: "transform".to_string(), - }), - context: vec![], - }), - headers: request_headers, - }; - let cycles = 49_140_000 + 1024 * 5_200 + 10_400 * 400 * 1024; // 1 KiB request, 400 KiB response - match http_request(request, cycles).await { - Ok((response,)) => { - // TODO(XC-158): ensure response is 200 before decoding - let tx = Transaction::consensus_decode(&mut response.body.as_slice()) - .map_err(BitcoinTxError::Encoding)?; - // Verify the correctness of the transaction by recomputing the transaction ID. - let decoded_tx_id = tx.compute_txid().to_string(); - if decoded_tx_id != tx_id { - return Err(BitcoinTxError::TxIdMismatch { - expected: tx_id, - decoded: decoded_tx_id, - }); +/// Check the input addresses of a transaction given its txid. +/// It consists of the following steps: +/// +/// 1. Call `try_fetch_tx` to find if the transaction has already +/// been fetched, or if another fetch is already pending, or +/// if we are experiencing high load, or we need to retry the +/// fetch (when the previous max_response_bytes setting wasn't +/// enough). +/// +/// 2. If we need to fetch this tranction, call the function `do_fetch`. +/// +/// 3. For fetched transaction, call `check_fetched`, which further +/// checks if the input addresses of this transaction are available. +/// If not, we need to additionally fetch those input transactions +/// in order to compute their corresponding addresses. +pub async fn check_transaction_inputs(txid: Txid) -> CheckTransactionResponse { + let env = &KytCanisterEnv; + match env.try_fetch_tx(txid) { + TryFetchResult::Pending => CheckTransactionPending::Pending.into(), + TryFetchResult::HighLoad => CheckTransactionPending::HighLoad.into(), + TryFetchResult::Error(err) => (txid, err).into(), + TryFetchResult::NotEnoughCycles => CheckTransactionStatus::NotEnoughCycles.into(), + TryFetchResult::Fetched(fetched) => env.check_fetched(txid, &fetched).await, + TryFetchResult::ToFetch(do_fetch) => { + match do_fetch.await { + Ok(FetchResult::Fetched(fetched)) => env.check_fetched(txid, &fetched).await, + Ok(FetchResult::Error(err)) => (txid, err).into(), + Ok(FetchResult::RetryWithBiggerBuffer) => CheckTransactionPending::Pending.into(), + Err(_) => unreachable!(), // should never happen } - Ok(tx) - } - Err((r, m)) => { - // TODO(XC-158): maybe try other providers and also log the error. - println!("The http_request resulted into error. RejectionCode: {r:?}, Error: {m}"); - Err(BitcoinTxError::Rejected { - code: r, - message: m, - }) } } } diff --git a/rs/bitcoin/kyt/src/main.rs b/rs/bitcoin/kyt/src/main.rs index 5b8ce0ec377..dc301fa5502 100644 --- a/rs/bitcoin/kyt/src/main.rs +++ b/rs/bitcoin/kyt/src/main.rs @@ -1,19 +1,13 @@ use bitcoin::{Address, Network}; -use ic_btc_kyt::{blocklist_contains, get_inputs_internal, CheckAddressArgs, CheckAddressResponse}; +use ic_btc_interface::Txid; +use ic_btc_kyt::{ + blocklist_contains, check_transaction_inputs, CheckAddressArgs, CheckAddressResponse, + CheckTransactionArgs, CheckTransactionError, CheckTransactionResponse, CheckTransactionStatus, + CHECK_TRANSACTION_CYCLES_REQUIRED, CHECK_TRANSACTION_CYCLES_SERVICE_FEE, +}; use ic_cdk::api::management_canister::http_request::{HttpResponse, TransformArgs}; use std::str::FromStr; -#[ic_cdk::update] -/// The function returns the Bitcoin addresses of the inputs in the -/// transaction with the given transaction ID. -async fn get_inputs(tx_id: String) -> Vec { - // TODO(XC-157): Charge cycles and also add guards. - match get_inputs_internal(tx_id).await { - Ok(inputs) => inputs, - Err(err) => panic!("Error in getting transaction inputs: {:?}", err), - } -} - #[ic_cdk::query] /// Return `Passed` if the given bitcion address passed the KYT check, or /// `Failed` otherwise. @@ -31,6 +25,42 @@ fn check_address(args: CheckAddressArgs) -> CheckAddressResponse { } } +#[ic_cdk::update] +/// Return `Passed` if all input addresses of the transaction of the given +/// transaction id passed the KYT check, or `Failed` if any of them did not. +/// +/// Every call to check_transaction must attach at least `CHECK_TRANSACTION_CYCLES_REQUIRED`. +/// Return `NotEnoughCycles` if not enough cycles are attached. +/// +/// The actual cycle cost may be well less than `CHECK_TRANSACTION_CYCLES_REQUIRED`, and +/// unspent cycles will be refunded back to the caller, minus a +/// `CHECK_TRANSACTION_CYCLES_SERVICE_FEE`, which is always deducted regardless. +/// +/// In certain cases, it may also return `HighLoad` or `Pending` to indicate the +/// caller needs to call again (with at least `CHECK_TRANSACTION_CYCLES_REQUIRED` cycles) +/// in order to get the result. +/// +/// If a permanent error occurred in the process, e.g, when a transaction data +/// fails to decode or its transaction id does not match, then `Error` is returned +/// together with a text description. +async fn check_transaction(args: CheckTransactionArgs) -> CheckTransactionResponse { + ic_cdk::api::call::msg_cycles_accept128(CHECK_TRANSACTION_CYCLES_SERVICE_FEE); + match Txid::try_from(args.txid.as_ref()) { + Ok(txid) => { + if ic_cdk::api::call::msg_cycles_available128() + .checked_add(CHECK_TRANSACTION_CYCLES_SERVICE_FEE) + .unwrap() + < CHECK_TRANSACTION_CYCLES_REQUIRED + { + CheckTransactionStatus::NotEnoughCycles.into() + } else { + check_transaction_inputs(txid).await + } + } + Err(err) => CheckTransactionError::InvalidTransaction(err.to_string()).into(), + } +} + #[ic_cdk::query(hidden = true)] fn transform(raw: TransformArgs) -> HttpResponse { HttpResponse { @@ -55,7 +85,7 @@ fn check_candid_interface_compatibility() { .join("btc_kyt_canister.did"); service_equal( - CandidSource::Text(&new_interface), + CandidSource::Text(dbg!(&new_interface)), CandidSource::File(old_interface.as_path()), ) .unwrap(); diff --git a/rs/bitcoin/kyt/src/state.rs b/rs/bitcoin/kyt/src/state.rs new file mode 100644 index 00000000000..2eef46b725e --- /dev/null +++ b/rs/bitcoin/kyt/src/state.rs @@ -0,0 +1,124 @@ +use bitcoin::{Address, Transaction}; +use ic_btc_interface::Txid; +use ic_cdk::api::call::RejectionCode; +use std::cell::RefCell; +use std::collections::BTreeMap; + +#[cfg(test)] +mod tests; + +/// Error returned by calling `http_get_tx`. +#[derive(Debug, Clone)] +pub enum HttpGetTxError { + TxEncoding(String), + TxidMismatch { + expected: Txid, + decoded: Txid, + }, + ResponseTooLarge, + Rejected { + code: RejectionCode, + message: String, + }, +} + +/// We store in state the `FetchStatus` for every `Txid` we fetch. +/// It transitions from `PendingOutcall` to any one of the three +/// possible outcomes: `PendingRetry`, `Error`, or `Fetched`. +#[derive(Debug, Clone)] +pub enum FetchTxStatus { + PendingOutcall, + PendingRetry { max_response_bytes: u32 }, + Error(HttpGetTxError), + Fetched(FetchedTx), +} + +/// Once the transaction data is successfully fetched, we create +/// a list of `input_addresses` (matching the number of inputs) +/// that is initialized as `None`. Once a corresponding input +/// transaction is fetched (see function `check_fetched`), its +/// input address will be computed and filled in. +#[derive(Debug, Clone)] +pub struct FetchedTx { + pub tx: Transaction, + pub input_addresses: Vec>, +} + +// Max number of concurrent http outcalls. +const MAX_CONCURRENT: u32 = 50; + +// The internal KYT state includes: +// 1. Outcall capacity, a semaphore limiting max concurrent outcalls. +// 2. fetch transaction status, indexed by transaction id. +// +// TODO(XC-191): persist canister state +thread_local! { + static OUTCALL_CAPACITY: RefCell = const { RefCell::new(MAX_CONCURRENT) }; + static FETCH_TX_STATUS: RefCell> = RefCell::new(BTreeMap::default()); +} + +pub fn get_fetch_status(txid: Txid) -> Option { + FETCH_TX_STATUS.with(|s| s.borrow().get(&txid).cloned()) +} + +pub fn set_fetch_status(txid: Txid, status: FetchTxStatus) { + FETCH_TX_STATUS.with(|s| { + let _ = s.borrow_mut().insert(txid, status); + }) +} + +pub fn clear_fetch_status(txid: Txid) { + FETCH_TX_STATUS.with(|s| { + let _ = s.borrow_mut().remove(&txid); + }) +} + +/// Set the address at the given `index` in the `Fetched` status of the given `txid`. +/// Pre-condition: the status of `txid` is `Fetched`, and `index` is within bounds. +pub fn set_fetched_address(txid: Txid, index: usize, address: Address) { + FETCH_TX_STATUS.with(|s| { + s.borrow_mut().entry(txid).and_modify(|status| { + if let FetchTxStatus::Fetched(fetched) = status { + fetched.input_addresses[index] = Some(address); + }; + }); + }) +} + +#[derive(Eq, PartialEq, Debug)] +pub struct FetchGuard(Txid); + +#[derive(Debug)] +pub enum FetchGuardError { + NoCapacity, +} + +impl FetchGuard { + pub fn new(txid: Txid) -> Result { + let guard = OUTCALL_CAPACITY.with(|capacity| { + let mut capacity = capacity.borrow_mut(); + if *capacity > 0 { + *capacity -= 1; + Ok(FetchGuard(txid)) + } else { + Err(FetchGuardError::NoCapacity) + } + })?; + set_fetch_status(txid, FetchTxStatus::PendingOutcall); + Ok(guard) + } +} + +impl Drop for FetchGuard { + fn drop(&mut self) { + OUTCALL_CAPACITY.with(|capacity| { + let mut capacity = capacity.borrow_mut(); + *capacity += 1; + }); + let txid = self.0; + if let Some(FetchTxStatus::PendingOutcall) = get_fetch_status(txid) { + // Only clear the status when it is still `PendingOutcall` + clear_fetch_status(txid); + } + } +} diff --git a/rs/bitcoin/kyt/src/state/tests.rs b/rs/bitcoin/kyt/src/state/tests.rs new file mode 100644 index 00000000000..41201ac4fff --- /dev/null +++ b/rs/bitcoin/kyt/src/state/tests.rs @@ -0,0 +1,80 @@ +use super::*; + +#[test] +fn test_fetch_guard() { + use super::*; + let txid = Txid::from([0u8; 32]); + let max = MAX_CONCURRENT; + { + let _guard = FetchGuard::new(txid).unwrap(); + assert_eq!(OUTCALL_CAPACITY.with(|c| *c.borrow()), max - 1); + assert!(matches!( + get_fetch_status(txid), + Some(FetchTxStatus::PendingOutcall) + )); + } + assert!(get_fetch_status(txid).is_none()); + assert_eq!(OUTCALL_CAPACITY.with(|c| *c.borrow()), max); + + { + let mut guards = Vec::new(); + for i in 0..max { + assert_eq!(OUTCALL_CAPACITY.with(|c| *c.borrow()), max - i); + let txid = Txid::from([(i + 1) as u8; 32]); + guards.push(FetchGuard::new(txid).unwrap()); + } + assert!(FetchGuard::new(txid).is_err()); + assert_eq!(OUTCALL_CAPACITY.with(|c| *c.borrow()), 0); + } + assert_eq!(OUTCALL_CAPACITY.with(|c| *c.borrow()), max); +} + +#[test] +fn test_fetch_status() { + let txid_0 = Txid::from([0u8; 32]); + assert!(get_fetch_status(txid_0).is_none()); + set_fetch_status(txid_0, FetchTxStatus::PendingOutcall); + assert!(matches!( + get_fetch_status(txid_0), + Some(FetchTxStatus::PendingOutcall) + )); + + let bytes = b"\ +\x02\x00\x00\x00\x01\x17\x34\x3a\xab\xa9\x67\x67\x2f\x17\xef\x0a\xbf\x4b\xb1\x14\xad\x19\x63\xe0\ +\x7d\xd2\xf2\x05\xaa\x25\xa4\xda\x50\x3e\xdb\x01\xab\x01\x00\x00\x00\x6a\x47\x30\x44\x02\x20\x21\ +\x81\xb5\x9c\xa7\xed\x7e\x2c\x8e\x06\x96\x52\xb0\x7e\xd2\x10\x24\x9e\x83\x37\xec\xc5\x35\xca\x6b\ +\x75\x3c\x02\x44\x89\xe4\x5d\x02\x20\x2a\xc7\x55\xcb\x55\x97\xf1\xcc\x2c\xad\x32\xb8\xa4\x33\xf1\ +\x79\x6b\x5f\x51\x76\x71\x6d\xa9\x22\x2c\x65\xf9\x44\xaf\xd1\x3d\xa8\x01\x21\x02\xc4\xc6\x9e\x4d\ +\x36\x4b\x3e\xdf\x84\xb5\x20\xa0\x18\xd5\x7e\x71\xfa\xce\x19\x7e\xc8\xf9\x46\x43\x60\x7e\x4a\xca\ +\x70\xdc\x82\xc1\xfd\xff\xff\xff\x02\x10\x27\x00\x00\x00\x00\x00\x00\x19\x76\xa9\x14\x11\xb3\x66\ +\xed\xfc\x0a\x8b\x66\xfe\xeb\xae\x5c\x2e\x25\xa7\xb6\xa5\xd1\xcf\x31\x88\xac\x7c\x2e\x00\x00\x00\ +\x00\x00\x00\x19\x76\xa9\x14\xb9\x73\x68\xd8\xbf\x0a\x37\x69\x00\x85\x16\x57\xf3\x7f\xbe\x73\xa6\ +\x56\x61\x33\x88\xac\x14\xa4\x0c\x00"; + use bitcoin::consensus::Decodable; + use bitcoin::Network; + let tx = Transaction::consensus_decode(&mut bytes.to_vec().as_slice()).unwrap(); + let txid_1 = Txid::from(*(tx.compute_txid().as_ref() as &[u8; 32])); + set_fetch_status( + txid_1, + FetchTxStatus::Fetched(FetchedTx { + tx: tx.clone(), + input_addresses: vec![None; 2], + }), + ); + assert!(matches!( + get_fetch_status(txid_1), + Some(FetchTxStatus::Fetched(_)) + )); + let address = Address::from_script(&tx.output[0].script_pubkey, Network::Bitcoin).unwrap(); + set_fetched_address(txid_1, 0, address.clone()); + match get_fetch_status(txid_1) { + Some(FetchTxStatus::Fetched(fetched)) => { + assert_eq!(fetched.input_addresses[0], Some(address)) + } + _ => { + panic!("txid {} is not found", txid_1) + } + } + clear_fetch_status(txid_1); + assert!(get_fetch_status(txid_1).is_none()); +} diff --git a/rs/bitcoin/kyt/src/types.rs b/rs/bitcoin/kyt/src/types.rs index 3abab50fdcc..2085094042d 100644 --- a/rs/bitcoin/kyt/src/types.rs +++ b/rs/bitcoin/kyt/src/types.rs @@ -12,3 +12,66 @@ pub enum CheckAddressResponse { Passed, Failed, } + +#[derive(CandidType, Debug, Deserialize, Serialize)] +pub struct CheckTransactionArgs { + pub txid: Vec, +} + +#[derive(CandidType, Debug, Deserialize, Serialize)] +pub enum CheckTransactionResponse { + /// When check finishes and all input addresses passed KYT. + Passed, + /// When check finishes and one or more input addresses failed KYT. + /// The list of failed addresses are returned as a best effort, which may be non-exhaustive. + Failed(Vec), + /// Unknown case where it is unable to give a final answer of Passed or Failed. + /// The caller should examine the status and decide how to handle it. + Unknown(CheckTransactionStatus), +} + +#[derive(CandidType, Debug, Deserialize, Serialize)] +pub enum CheckTransactionStatus { + /// Caller should call with a minimum of `CHECK_TRANSACTION_CYCLES_REQUIRED` cycles. + NotEnoughCycles, + /// The result is pending, and the caller can call again later. + Pending(CheckTransactionPending), + /// The result is unknown due to an irrecoverable error. + Error(CheckTransactionError), +} + +#[derive(CandidType, Debug, Deserialize, Serialize)] +pub enum CheckTransactionPending { + /// Work is already in progress, and the result is pending. + Pending, + /// The service is experience high load. + HighLoad, + /// There was a transient error fetching data. + TransientInternalError(String), +} + +#[derive(CandidType, Debug, Deserialize, Serialize)] +pub enum CheckTransactionError { + /// Response size is too large (> `RETRY_MAX_RESPONSE_BYTES`) when fetching the transaction data of a txid. + ResponseTooLarge { txid: Vec }, + /// Invalid transaction, e.g. error decoding transaction or transaction id mismatch, etc. + InvalidTransaction(String), +} + +impl From for CheckTransactionResponse { + fn from(err: CheckTransactionError) -> CheckTransactionResponse { + CheckTransactionResponse::Unknown(CheckTransactionStatus::Error(err)) + } +} + +impl From for CheckTransactionResponse { + fn from(pending: CheckTransactionPending) -> CheckTransactionResponse { + CheckTransactionResponse::Unknown(CheckTransactionStatus::Pending(pending)) + } +} + +impl From for CheckTransactionResponse { + fn from(status: CheckTransactionStatus) -> CheckTransactionResponse { + CheckTransactionResponse::Unknown(status) + } +} diff --git a/rs/bitcoin/kyt/tests/tests.rs b/rs/bitcoin/kyt/tests/tests.rs index 92650a9f6a9..e7ce558826b 100644 --- a/rs/bitcoin/kyt/tests/tests.rs +++ b/rs/bitcoin/kyt/tests/tests.rs @@ -1,14 +1,46 @@ -use candid::{Decode, Encode, Principal}; +use candid::{decode_one, CandidType, Deserialize, Encode, Principal}; +use ic_base_types::PrincipalId; +use ic_btc_interface::Txid; +use ic_btc_kyt::{ + blocklist, get_tx_cycle_cost, CheckAddressArgs, CheckAddressResponse, CheckTransactionArgs, + CheckTransactionError, CheckTransactionPending, CheckTransactionResponse, + CheckTransactionStatus, CHECK_TRANSACTION_CYCLES_REQUIRED, + CHECK_TRANSACTION_CYCLES_SERVICE_FEE, INITIAL_MAX_RESPONSE_BYTES, +}; use ic_test_utilities_load_wasm::load_wasm; +use ic_types::Cycles; +use ic_universal_canister::{call_args, wasm, UNIVERSAL_CANISTER_WASM}; use pocket_ic::{ common::rest::{ - CanisterHttpReply, CanisterHttpRequest, CanisterHttpResponse, MockCanisterHttpResponse, + CanisterHttpHeader, CanisterHttpReply, CanisterHttpRequest, CanisterHttpResponse, + MockCanisterHttpResponse, RawMessageId, }, - query_candid, PocketIc, WasmResult, + query_candid, PocketIc, UserError, WasmResult, }; +use std::str::FromStr; const MAX_TICKS: usize = 10; +// Because we use universal_canister to make calls with attached cycles to +// `check_transaction`, the actual_cost would be greater than expected_cost +// by a small margin. Namely, the universal_canister itself would consume +// some cycle for decoding args and sending the call. +// +// The number 7_000_000 is obtained empirically by running tests with pocket-ic +// and checking the actual consumptions. It is both big enough to allow tests to +// succeed, and small enough not to interfere with the expected cycle cost we +// are testing for. +const UNIVERSAL_CANISTER_CYCLE_MARGIN: u128 = 7_000_000; + +struct Setup { + // Owner of canisters created for the setup. + controller: Principal, + // The `caller` canister helps to proxy update calls with cycle payment. + caller: Principal, + kyt_canister: Principal, + env: PocketIc, +} + fn kyt_wasm() -> Vec { load_wasm( std::env::var("CARGO_MANIFEST_DIR").unwrap(), @@ -17,26 +49,71 @@ fn kyt_wasm() -> Vec { ) } -fn setup_env() -> (Principal, PocketIc) { - let env = PocketIc::new(); +impl Setup { + fn new() -> Setup { + let controller = PrincipalId::new_user_test_id(1).0; + let env = PocketIc::new(); + + let caller = env.create_canister_with_settings(Some(controller), None); + env.add_cycles(caller, 100_000_000_000_000); + env.install_canister( + caller, + UNIVERSAL_CANISTER_WASM.to_vec(), + vec![], + Some(controller), + ); + + let kyt_canister = env.create_canister(); + env.add_cycles(kyt_canister, 100_000_000_000_000); + env.install_canister(kyt_canister, kyt_wasm(), vec![], None); + + Setup { + controller, + caller, + kyt_canister, + env, + } + } + + fn submit_kyt_call( + &self, + method: &str, + args: Vec, + cycles: u128, + ) -> Result { + let payload = wasm() + .call_with_cycles( + PrincipalId(self.kyt_canister), + method, + call_args() + .other_side(args) + .on_reject(wasm().reject_message().reject()), + Cycles::new(cycles), + ) + .build(); + self.env + .submit_call(self.caller, self.controller, "update", payload) + } +} - let kyt = env.create_canister(); - env.add_cycles(kyt, 100_000_000_000_000); - env.install_canister(kyt, kyt_wasm(), vec![], None); - (kyt, env) +fn decode<'a, T: CandidType + Deserialize<'a>>(result: &'a WasmResult) -> T { + match result { + WasmResult::Reply(bytes) => decode_one(bytes).unwrap(), + WasmResult::Reject(msg) => panic!("unexpected reject: {}", msg), + } } #[test] fn test_check_address() { - use ic_btc_kyt::{blocklist, CheckAddressArgs, CheckAddressResponse}; - - let (kyt, env) = setup_env(); + let Setup { + kyt_canister, env, .. + } = Setup::new(); // Choose an address from the blocklist let blocklist_len = blocklist::BTC_ADDRESS_BLOCKLIST.len(); let result = query_candid( &env, - kyt, + kyt_canister, "check_address", (CheckAddressArgs { address: blocklist::BTC_ADDRESS_BLOCKLIST[blocklist_len / 2].to_string(), @@ -51,7 +128,7 @@ fn test_check_address() { // Satoshi's address hopefully is not in the blocklist let result = query_candid( &env, - kyt, + kyt_canister, "check_address", (CheckAddressArgs { address: "12cbQLTFMXRnSzktFkuoG3eHoMeFtpTu3S".to_string(), @@ -66,7 +143,7 @@ fn test_check_address() { // Test with an malformed address let result = query_candid::<_, (CheckAddressResponse,)>( &env, - kyt, + kyt_canister, "check_address", (CheckAddressArgs { address: "not an address".to_string(), @@ -77,7 +154,7 @@ fn test_check_address() { // Test with an testnet address let result = query_candid::<_, (CheckAddressResponse,)>( &env, - kyt, + kyt_canister, "check_address", (CheckAddressArgs { address: "n47QBape2PcisN2mkHR2YnhqoBr56iPhJh".to_string(), @@ -87,21 +164,23 @@ fn test_check_address() { } #[test] -fn test_get_inputs() { - let (kyt, env) = setup_env(); - let sender = Principal::anonymous(); - - let call_id = env - .submit_call( - kyt, - sender, - "get_inputs", - Encode!( - &"c80763842edc9a697a2114517cf0c138c5403a761ef63cfad1fa6993fa3475ed".to_string() - ) +fn test_check_transaction_passed() { + let setup = Setup::new(); + let cycles_before = setup.env.cycle_balance(setup.caller); + + let txid = + Txid::from_str("c80763842edc9a697a2114517cf0c138c5403a761ef63cfad1fa6993fa3475ed").unwrap(); + let call_id = setup + .submit_kyt_call( + "check_transaction", + Encode!(&CheckTransactionArgs { + txid: txid.as_ref().to_vec() + }) .unwrap(), + CHECK_TRANSACTION_CYCLES_REQUIRED, ) .expect("submit_call failed to return call id"); + let env = &setup.env; // The response body used for testing below is generated from the output of // @@ -110,14 +189,8 @@ fn test_get_inputs() { // There wll be two outcalls because the canister will first fetch the above // given txid, and then fetch the vout[0] from the returned transaction body. - let canister_http_requests = tick_until_next_request(&env); - env.mock_canister_http_response(MockCanisterHttpResponse { - subnet_id: canister_http_requests[0].subnet_id, - request_id: canister_http_requests[0].request_id, - response: CanisterHttpResponse::CanisterHttpReply(CanisterHttpReply { - status: 200, - headers: vec![], - body: b"\ + let canister_http_requests = tick_until_next_request(env); + let body = b"\ \x02\x00\x00\x00\x01\x17\x34\x3a\xab\xa9\x67\x67\x2f\x17\xef\x0a\xbf\x4b\xb1\x14\xad\x19\x63\xe0\ \x7d\xd2\xf2\x05\xaa\x25\xa4\xda\x50\x3e\xdb\x01\xab\x01\x00\x00\x00\x6a\x47\x30\x44\x02\x20\x21\ \x81\xb5\x9c\xa7\xed\x7e\x2c\x8e\x06\x96\x52\xb0\x7e\xd2\x10\x24\x9e\x83\x37\xec\xc5\x35\xca\x6b\ @@ -128,19 +201,20 @@ fn test_get_inputs() { \xed\xfc\x0a\x8b\x66\xfe\xeb\xae\x5c\x2e\x25\xa7\xb6\xa5\xd1\xcf\x31\x88\xac\x7c\x2e\x00\x00\x00\ \x00\x00\x00\x19\x76\xa9\x14\xb9\x73\x68\xd8\xbf\x0a\x37\x69\x00\x85\x16\x57\xf3\x7f\xbe\x73\xa6\ \x56\x61\x33\x88\xac\x14\xa4\x0c\x00" - .to_vec(), - }), - additional_responses: vec![], - }); - - let canister_http_requests = tick_until_next_request(&env); + .to_vec(); env.mock_canister_http_response(MockCanisterHttpResponse { subnet_id: canister_http_requests[0].subnet_id, request_id: canister_http_requests[0].request_id, response: CanisterHttpResponse::CanisterHttpReply(CanisterHttpReply { status: 200, headers: vec![], - body: b"\ + body, + }), + additional_responses: vec![], + }); + + let canister_http_requests = tick_until_next_request(env); + let body = b"\ \x02\x00\x00\x00\x01\x82\xc8\x5d\xe7\x4d\x19\xbb\x36\x16\x2f\xca\xef\xc7\xe7\x70\x15\x65\xb0\x2d\ \xf6\x06\x0f\x8e\xcf\x49\x64\x63\x37\xfc\xe8\x59\x37\x07\x00\x00\x00\x6a\x47\x30\x44\x02\x20\x15\ \xf2\xc7\x7a\x3b\x95\x13\x73\x7a\xa2\x86\xb3\xe6\x06\xf9\xb6\x82\x1c\x6d\x5d\x35\xe5\xa9\x58\xe0\ @@ -151,25 +225,146 @@ fn test_get_inputs() { \xb1\x5c\xbf\x27\xd5\x42\x53\x99\xeb\xf6\xf0\xfb\x50\xeb\xb8\x8f\x18\x88\xac\x00\x96\x00\x00\x00\ \x00\x00\x00\x19\x76\xa9\x14\xb9\x73\x68\xd8\xbf\x0a\x37\x69\x00\x85\x16\x57\xf3\x7f\xbe\x73\xa6\ \x56\x61\x33\x88\xac\xb3\xa3\x0c\x00" - .to_vec(), + .to_vec(); + env.mock_canister_http_response(MockCanisterHttpResponse { + subnet_id: canister_http_requests[0].subnet_id, + request_id: canister_http_requests[0].request_id, + response: CanisterHttpResponse::CanisterHttpReply(CanisterHttpReply { + status: 200, + headers: vec![], + body: body.clone(), }), - additional_responses: vec![], + // Fill additional responses with different headers to test if the transform + // function does its job by clearing the headers. + additional_responses: (1..13) + .map(|i| { + CanisterHttpResponse::CanisterHttpReply(CanisterHttpReply { + status: 200, + headers: vec![CanisterHttpHeader { + name: format!("name-{}", i), + value: format!("{}", i), + }], + body: body.clone(), + }) + }) + .collect(), }); let result = env .await_call(call_id) .expect("the fetch request didn't finish"); - match &result { - WasmResult::Reply(bytes) => { - let response = Decode!(bytes, Vec).unwrap(); - assert_eq!( - response, - vec!["1HuaGHYa4rjqpcqkm1thE48s88rf1QpR5J".to_string()], - ); - } - WasmResult::Reject(msg) => panic!("unexpected reject: {}", msg), - } + assert!(matches!( + decode::(&result), + CheckTransactionResponse::Passed + )); + + let cycles_after = env.cycle_balance(setup.caller); + let expected_cost = + CHECK_TRANSACTION_CYCLES_SERVICE_FEE + 2 * get_tx_cycle_cost(INITIAL_MAX_RESPONSE_BYTES); + let actual_cost = cycles_before - cycles_after; + assert!(actual_cost > expected_cost); + assert!(actual_cost - expected_cost < UNIVERSAL_CANISTER_CYCLE_MARGIN); +} + +#[test] +fn test_check_transaction_error() { + let setup = Setup::new(); + let cycles_before = setup.env.cycle_balance(setup.caller); + let mut txid = + Txid::from_str("a80763842edc9a697a2114517cf0c138c5403a761ef63cfad1fa6993fa3475ed") + .unwrap() + .as_ref() + .to_vec(); + + // Test for cycles not enough + let call_id = setup + .submit_kyt_call( + "check_transaction", + Encode!(&CheckTransactionArgs { txid: txid.clone() }).unwrap(), + CHECK_TRANSACTION_CYCLES_REQUIRED - 1, + ) + .expect("submit_call failed to return call id"); + let result = setup + .env + .await_call(call_id) + .expect("the fetch request didn't finish"); + assert!(matches!( + decode::(&result), + CheckTransactionResponse::Unknown(CheckTransactionStatus::NotEnoughCycles), + )); + + let cycles_after = setup.env.cycle_balance(setup.caller); + let expected_cost = CHECK_TRANSACTION_CYCLES_SERVICE_FEE; + let actual_cost = cycles_before - cycles_after; + assert!(actual_cost > expected_cost); + assert!(actual_cost - expected_cost < UNIVERSAL_CANISTER_CYCLE_MARGIN); + + // Test for 500 error + let cycles_before = setup.env.cycle_balance(setup.caller); + let call_id = setup + .submit_kyt_call( + "check_transaction", + Encode!(&CheckTransactionArgs { txid: txid.clone() }).unwrap(), + CHECK_TRANSACTION_CYCLES_REQUIRED, + ) + .expect("submit_call failed to return call id"); + let canister_http_requests = tick_until_next_request(&setup.env); + setup + .env + .mock_canister_http_response(MockCanisterHttpResponse { + subnet_id: canister_http_requests[0].subnet_id, + request_id: canister_http_requests[0].request_id, + response: CanisterHttpResponse::CanisterHttpReply(CanisterHttpReply { + status: 500, + headers: vec![], + body: vec![], + }), + additional_responses: vec![], + }); + let result = setup + .env + .await_call(call_id) + .expect("the fetch request didn't finish"); + assert!(matches!( + dbg!(decode::(&result)), + CheckTransactionResponse::Unknown(CheckTransactionStatus::Pending( + CheckTransactionPending::TransientInternalError(_) + )) + )); + let cycles_after = setup.env.cycle_balance(setup.caller); + let expected_cost = + CHECK_TRANSACTION_CYCLES_SERVICE_FEE + get_tx_cycle_cost(INITIAL_MAX_RESPONSE_BYTES); + let actual_cost = cycles_before - cycles_after; + assert!(actual_cost > expected_cost); + assert!(actual_cost - expected_cost < UNIVERSAL_CANISTER_CYCLE_MARGIN); + + // Test for malformatted txid + let cycles_before = setup.env.cycle_balance(setup.caller); + txid.pop(); + let call_id = setup + .submit_kyt_call( + "check_transaction", + Encode!(&CheckTransactionArgs { txid }).unwrap(), + CHECK_TRANSACTION_CYCLES_REQUIRED, + ) + .expect("submit_call failed to return call id"); + let result = setup + .env + .await_call(call_id) + .expect("the fetch request didn't finish"); + assert!(matches!( + decode::(&result), + CheckTransactionResponse::Unknown(CheckTransactionStatus::Error( + CheckTransactionError::InvalidTransaction(_) + )) + )); + + let cycles_after = setup.env.cycle_balance(setup.caller); + let expected_cost = CHECK_TRANSACTION_CYCLES_SERVICE_FEE; + let actual_cost = cycles_before - cycles_after; + assert!(actual_cost > expected_cost); + assert!(actual_cost - expected_cost < UNIVERSAL_CANISTER_CYCLE_MARGIN); } fn tick_until_next_request(env: &PocketIc) -> Vec { @@ -182,8 +377,9 @@ fn tick_until_next_request(env: &PocketIc) -> Vec { let canister_http_requests = env.get_canister_http(); assert!( !canister_http_requests.is_empty(), - "The canister did not produce another request in {} ticks", - MAX_TICKS + "The canister did not produce another request in {} ticks {:?}", + MAX_TICKS, + canister_http_requests ); canister_http_requests } diff --git a/rs/cross-chain/proposal-cli/BUILD.bazel b/rs/cross-chain/proposal-cli/BUILD.bazel index 3da543db894..8ecc9763dad 100644 --- a/rs/cross-chain/proposal-cli/BUILD.bazel +++ b/rs/cross-chain/proposal-cli/BUILD.bazel @@ -10,6 +10,7 @@ DEPENDENCIES = [ "@crate_index//:reqwest", "@crate_index//:serde", "@crate_index//:serde_json", + "@crate_index//:sha2", "@crate_index//:strum", "@crate_index//:tempfile", "@crate_index//:tokio", diff --git a/rs/cross-chain/proposal-cli/Cargo.toml b/rs/cross-chain/proposal-cli/Cargo.toml index eeb2edbc9b1..8509f2b50e2 100644 --- a/rs/cross-chain/proposal-cli/Cargo.toml +++ b/rs/cross-chain/proposal-cli/Cargo.toml @@ -15,6 +15,7 @@ hex = { workspace = true } reqwest = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } +sha2 = { workspace = true } strum = { workspace = true } strum_macros = { workspace = true } tempfile = { workspace = true } diff --git a/rs/cross-chain/proposal-cli/src/candid/mod.rs b/rs/cross-chain/proposal-cli/src/candid/mod.rs index 55182073d81..63107eb4125 100644 --- a/rs/cross-chain/proposal-cli/src/candid/mod.rs +++ b/rs/cross-chain/proposal-cli/src/candid/mod.rs @@ -1,6 +1,7 @@ #[cfg(test)] mod tests; +use crate::git::ArgsHash; use candid::types::{Type, TypeInner}; use candid::TypeEnv; use std::path::Path; @@ -12,6 +13,7 @@ pub struct UpgradeArgs { constructor_types: Vec, upgrade_args: String, encoded_upgrade_args: Vec, + args_sha256: ArgsHash, candid_file_name: String, } @@ -24,6 +26,10 @@ impl UpgradeArgs { hex::encode(&self.encoded_upgrade_args) } + pub fn args_sha256_hex(&self) -> String { + self.args_sha256.to_string() + } + pub fn didc_encode_cmd(&self) -> String { if self.upgrade_args != EMPTY_UPGRADE_ARGS { format!( @@ -49,6 +55,7 @@ pub fn encode_upgrade_args>(candid_file: &Path, upgrade_args: F) .expect("fail to parse upgrade args") .to_bytes_with_types(&env, &upgrade_types) .expect("failed to encode"); + let args_sha256 = ArgsHash::sha256(&encoded_upgrade_args); let candid_file_name = candid_file .file_name() .expect("missing file name") @@ -58,6 +65,7 @@ pub fn encode_upgrade_args>(candid_file: &Path, upgrade_args: F) constructor_types: upgrade_types, upgrade_args, encoded_upgrade_args, + args_sha256, candid_file_name, } } diff --git a/rs/cross-chain/proposal-cli/src/git/mod.rs b/rs/cross-chain/proposal-cli/src/git/mod.rs index dc46bd0734c..6e71249920b 100644 --- a/rs/cross-chain/proposal-cli/src/git/mod.rs +++ b/rs/cross-chain/proposal-cli/src/git/mod.rs @@ -37,8 +37,18 @@ impl Display for Hash { } } +impl Hash<32> { + pub fn sha256(data: &[u8]) -> Self { + use sha2::Digest; + let mut hasher = sha2::Sha256::new(); + hasher.update(data); + Self(hasher.finalize().into()) + } +} + pub type GitCommitHash = Hash<20>; pub type CompressedWasmHash = Hash<32>; +pub type ArgsHash = Hash<32>; #[derive(Debug)] pub struct GitRepository { diff --git a/rs/cross-chain/proposal-cli/src/ic_admin/mod.rs b/rs/cross-chain/proposal-cli/src/ic_admin/mod.rs index 717e7a586b7..3227a874d9f 100644 --- a/rs/cross-chain/proposal-cli/src/ic_admin/mod.rs +++ b/rs/cross-chain/proposal-cli/src/ic_admin/mod.rs @@ -62,6 +62,7 @@ impl IcAdminArgs { wasm_module_path: generated_files.wasm.to_string_lossy().to_string(), wasm_module_sha256: proposal.compressed_wasm_hash().clone(), arg: generated_files.arg.to_string_lossy().to_string(), + arg_sha256: proposal.args_sha256_hex(), summary_file: generated_files.summary.to_string_lossy().to_string(), } .render() @@ -111,6 +112,9 @@ pub struct IcAdminTemplate { /// canister. pub arg: String, + /// The sha256 of the arg binary file. + pub arg_sha256: String, + /// A file containing a human-readable summary of the proposal content. pub summary_file: String, } diff --git a/rs/cross-chain/proposal-cli/src/ic_admin/tests.rs b/rs/cross-chain/proposal-cli/src/ic_admin/tests.rs index fe183fc8b97..3ced232f400 100644 --- a/rs/cross-chain/proposal-cli/src/ic_admin/tests.rs +++ b/rs/cross-chain/proposal-cli/src/ic_admin/tests.rs @@ -95,6 +95,7 @@ fn ic_admin_template() -> IcAdminTemplate { .parse() .unwrap(), arg: "arg".to_string(), + arg_sha256: "1111111111111111111111111111111111111111111111111111111111111111".to_string(), summary_file: "summary.md".to_string(), } } diff --git a/rs/cross-chain/proposal-cli/src/proposal/mod.rs b/rs/cross-chain/proposal-cli/src/proposal/mod.rs index 961b63aff90..34e95a33f37 100644 --- a/rs/cross-chain/proposal-cli/src/proposal/mod.rs +++ b/rs/cross-chain/proposal-cli/src/proposal/mod.rs @@ -61,6 +61,13 @@ impl ProposalTemplate { .expect("failed to write hex args"); } + pub fn args_sha256_hex(&self) -> String { + match self { + ProposalTemplate::Upgrade(template) => template.upgrade_args.args_sha256_hex(), + ProposalTemplate::Install(template) => template.install_args.args_sha256_hex(), + } + } + pub fn render(&self) -> String { match self { ProposalTemplate::Upgrade(template) => template.render(), diff --git a/rs/cross-chain/proposal-cli/templates/submit_with_ic_admin.shx b/rs/cross-chain/proposal-cli/templates/submit_with_ic_admin.shx index 7062234744d..a2eb5590a49 100644 --- a/rs/cross-chain/proposal-cli/templates/submit_with_ic_admin.shx +++ b/rs/cross-chain/proposal-cli/templates/submit_with_ic_admin.shx @@ -29,4 +29,5 @@ ic-admin \ --wasm-module-path "{{wasm_module_path}}" \ --wasm-module-sha256 {{wasm_module_sha256}} \ --arg "{{arg}}" \ + --arg-sha256 {{arg_sha256}} \ --summary-file "{{summary_file}}" diff --git a/rs/ethereum/cketh/minter/src/management.rs b/rs/ethereum/cketh/minter/src/management.rs index 967009c8f71..6fda8ffc746 100644 --- a/rs/ethereum/cketh/minter/src/management.rs +++ b/rs/ethereum/cketh/minter/src/management.rs @@ -1,9 +1,5 @@ -use candid::{CandidType, Principal}; use ic_cdk::api::call::RejectionCode; -use ic_management_canister_types::{ - DerivationPath, EcdsaCurve, EcdsaKeyId, SignWithECDSAArgs, SignWithECDSAReply, -}; -use serde::de::DeserializeOwned; +use ic_management_canister_types::DerivationPath; use std::fmt; /// Represents an error from a management canister call, such as @@ -83,63 +79,39 @@ impl Reason { } } -async fn call(method: &str, payment: u64, input: &I) -> Result -where - I: CandidType, - O: CandidType + DeserializeOwned, -{ - let balance = ic_cdk::api::canister_balance128(); - if balance < payment as u128 { - return Err(CallError { - method: method.to_string(), - reason: Reason::OutOfCycles, - }); - } - - let res: Result<(O,), _> = ic_cdk::api::call::call_with_payment( - Principal::management_canister(), - method, - (input,), - payment, - ) - .await; - - match res { - Ok((output,)) => Ok(output), - Err((code, msg)) => Err(CallError { - method: method.to_string(), - reason: Reason::from_reject(code, msg), - }), - } -} - /// Signs a message hash using the tECDSA API. pub async fn sign_with_ecdsa( key_name: String, derivation_path: DerivationPath, message_hash: [u8; 32], ) -> Result<[u8; 64], CallError> { - const CYCLES_PER_SIGNATURE: u64 = 25_000_000_000; + use ic_cdk::api::management_canister::ecdsa::{ + sign_with_ecdsa, EcdsaCurve, EcdsaKeyId, SignWithEcdsaArgument, + }; - let reply: SignWithECDSAReply = call( - "sign_with_ecdsa", - CYCLES_PER_SIGNATURE, - &SignWithECDSAArgs { - message_hash, - derivation_path, - key_id: EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: key_name.clone(), - }, + let result = sign_with_ecdsa(SignWithEcdsaArgument { + message_hash: message_hash.to_vec(), + derivation_path: derivation_path.into_inner(), + key_id: EcdsaKeyId { + curve: EcdsaCurve::Secp256k1, + name: key_name.clone(), }, - ) - .await?; + }) + .await; - let signature_length = reply.signature.len(); - Ok(<[u8; 64]>::try_from(reply.signature).unwrap_or_else(|_| { - panic!( - "BUG: invalid signature from management canister. Expected 64 bytes but got {} bytes", - signature_length - ) - })) + match result { + Ok((reply,)) => { + let signature_length = reply.signature.len(); + Ok(<[u8; 64]>::try_from(reply.signature).unwrap_or_else(|_| { + panic!( + "BUG: invalid signature from management canister. Expected 64 bytes but got {} bytes", + signature_length + ) + })) + } + Err((code, msg)) => Err(CallError { + method: "sign_with_ecdsa".to_string(), + reason: Reason::from_reject(code, msg), + }), + } } diff --git a/rs/execution_environment/src/execution/response.rs b/rs/execution_environment/src/execution/response.rs index 424a515e710..71ebb03812f 100644 --- a/rs/execution_environment/src/execution/response.rs +++ b/rs/execution_environment/src/execution/response.rs @@ -452,12 +452,6 @@ impl ResponseHelper { round.counters.charging_from_balance_error, ); - if let Some(state_changes) = &canister_state_changes { - let requested = state_changes.system_state_changes.removed_cycles(); - // A cleanup callback cannot accept and send cycles. - assert_eq!(requested.get(), 0); - } - apply_canister_state_changes( canister_state_changes, self.canister.execution_state.as_mut().unwrap(), diff --git a/rs/ic_os/config/BUILD.bazel b/rs/ic_os/config/BUILD.bazel index e90ec656252..2438720360d 100644 --- a/rs/ic_os/config/BUILD.bazel +++ b/rs/ic_os/config/BUILD.bazel @@ -1,20 +1,54 @@ -load("@rules_rust//rust:defs.bzl", "rust_library") +load("@rules_rust//rust:defs.bzl", "rust_binary", "rust_library", "rust_test") package(default_visibility = ["//rs:ic-os-pkg"]) DEPENDENCIES = [ # Keep sorted. + "//rs/ic_os/utils", + "//rs/types/types", "@crate_index//:anyhow", + "@crate_index//:clap", + "@crate_index//:regex", + "@crate_index//:serde", + "@crate_index//:serde_json", + "@crate_index//:serde_with", + "@crate_index//:url", ] +DEV_DEPENDENCIES = [ + # Keep sorted. + "@crate_index//:once_cell", + "@crate_index//:tempfile", +] + +MACRO_DEPENDENCIES = [] + +ALIASES = {} + rust_library( - name = "config", + name = "config_lib", srcs = glob( ["src/**/*.rs"], + exclude = ["src/main.rs"], ), - aliases = {}, crate_name = "config", edition = "2021", - proc_macro_deps = [], deps = DEPENDENCIES, ) + +rust_binary( + name = "config", + srcs = ["src/main.rs"], + aliases = ALIASES, + crate_name = "config", + edition = "2021", + proc_macro_deps = MACRO_DEPENDENCIES, + deps = [":config_lib"] + DEPENDENCIES, +) + +rust_test( + name = "config_lib_test", + crate = ":config_lib", + # You may add other deps that are specific to the test configuration + deps = DEV_DEPENDENCIES, +) diff --git a/rs/ic_os/config/Cargo.toml b/rs/ic_os/config/Cargo.toml index fcc53eb1e5e..811e805c465 100644 --- a/rs/ic_os/config/Cargo.toml +++ b/rs/ic_os/config/Cargo.toml @@ -5,3 +5,23 @@ edition = "2021" [dependencies] anyhow = { workspace = true } +ic-types = { path = "../../types/types" } +clap = { workspace = true } +utils = { path = "../utils" } +url = { workspace = true } +serde_json = { workspace = true } +serde = { workspace = true } +serde_with = "1.6.2" +regex = { workspace = true } + +[dev-dependencies] +once_cell = "1.8" +tempfile = { workspace = true } + +[lib] +name = "config" +path = "src/lib.rs" + +[[bin]] +name = "config" +path = "src/main.rs" diff --git a/rs/ic_os/config/README.md b/rs/ic_os/config/README.md new file mode 100644 index 00000000000..53567a8c9f6 --- /dev/null +++ b/rs/ic_os/config/README.md @@ -0,0 +1,11 @@ +# IC-OS Config + +IC-OS Config is responsible for managing the configuration of IC-OS images. + +SetupOS transforms user-facing configuration files (like `config.ini`, `deployment.json`, etc.) into a SetupOSConfig struct. Then, in production, configuration is propagated from SetupOS → HostOS → GuestOS (→ replica) via the HostOSConfig and GuestOSConfig structures. + +All access to configuration and the config partition should go through the config structures. + +For testing, IC-OS Config is also used to create HostOS and GuestOS configuration directly. + +For details on the IC-OS configuration mechanism, refer to [ic-os/docs/Configuration.adoc](../../../ic-os/docs/Configuration.adoc) \ No newline at end of file diff --git a/rs/ic_os/config/src/config_ini.rs b/rs/ic_os/config/src/config_ini.rs new file mode 100644 index 00000000000..d90a77d95be --- /dev/null +++ b/rs/ic_os/config/src/config_ini.rs @@ -0,0 +1,334 @@ +use regex::Regex; +use std::collections::HashMap; +use std::fs::read_to_string; +use std::net::{Ipv4Addr, Ipv6Addr}; +use std::path::Path; + +use anyhow::bail; +use anyhow::{Context, Result}; + +pub type ConfigMap = HashMap; +pub struct ConfigIniSettings { + pub ipv6_prefix: Option, + pub ipv6_address: Option, + pub ipv6_prefix_length: u8, + pub ipv6_gateway: Ipv6Addr, + pub ipv4_address: Option, + pub ipv4_gateway: Option, + pub ipv4_prefix_length: Option, + pub domain: Option, + pub verbose: bool, +} + +// Prefix should have a max length of 19 ("1234:6789:1234:6789") +// It could have fewer characters though. Parsing as an ip address with trailing '::' should work. +fn is_valid_ipv6_prefix(ipv6_prefix: &str) -> bool { + ipv6_prefix.len() <= 19 && format!("{ipv6_prefix}::").parse::().is_ok() +} + +pub fn get_config_ini_settings(config_file_path: &Path) -> Result { + let config_map: ConfigMap = config_map_from_path(config_file_path)?; + + let ipv6_prefix = config_map + .get("ipv6_prefix") + .map(|prefix| { + if !is_valid_ipv6_prefix(prefix) { + bail!("Invalid ipv6 prefix: {}", prefix); + } + Ok(prefix.clone()) + }) + .transpose()?; + + // Per PFOPS - ipv6_prefix_length will always be 64 + let ipv6_prefix_length = 64_u8; + + // Optional ipv6_address - for testing. Takes precedence over ipv6_prefix. + let ipv6_address = config_map + .get("ipv6_address") + .map(|address| { + // ipv6_address might be formatted with the trailing suffix. Remove it. + address + .strip_suffix(&format!("/{}", ipv6_prefix_length)) + .unwrap_or(address) + .parse::() + .context(format!("Invalid IPv6 address: {}", address)) + }) + .transpose()?; + + if ipv6_address.is_none() && ipv6_prefix.is_none() { + bail!("Missing config parameter: need at least one of ipv6_prefix or ipv6_address"); + } + + let ipv6_gateway = config_map + .get("ipv6_gateway") + .context("Missing config parameter: ipv6_gateway")? + .parse::() + .context("Invalid IPv6 gateway address")?; + + let ipv4_address = config_map + .get("ipv4_address") + .map(|address| { + address + .parse::() + .context(format!("Invalid IPv4 address: {}", address)) + }) + .transpose()?; + + let ipv4_gateway = config_map + .get("ipv4_gateway") + .map(|address| { + address + .parse::() + .context(format!("Invalid IPv4 gateway: {}", address)) + }) + .transpose()?; + + let ipv4_prefix_length = config_map + .get("ipv4_prefix_length") + .map(|prefix| { + let prefix = prefix + .parse::() + .context(format!("Invalid IPv4 prefix length: {}", prefix))?; + if prefix > 32 { + bail!( + "IPv4 prefix length must be between 0 and 32, got {}", + prefix + ); + } + Ok(prefix) + }) + .transpose()?; + + let domain = config_map.get("domain").cloned(); + + let verbose = config_map + .get("verbose") + .is_some_and(|s| s.eq_ignore_ascii_case("true")); + + Ok(ConfigIniSettings { + ipv6_prefix, + ipv6_address, + ipv6_prefix_length, + ipv6_gateway, + ipv4_address, + ipv4_gateway, + ipv4_prefix_length, + domain, + verbose, + }) +} + +fn parse_config_line(line: &str) -> Option<(String, String)> { + // Skip blank lines and comments + if line.is_empty() || line.trim().starts_with('#') { + return None; + } + + let parts: Vec<&str> = line.splitn(2, '=').collect(); + if parts.len() == 2 { + Some((parts[0].trim().into(), parts[1].trim().into())) + } else { + eprintln!("Warning: skipping config line due to unrecognized format: \"{line}\""); + eprintln!("Expected format: \"=\""); + None + } +} + +pub fn config_map_from_path(config_file_path: &Path) -> Result { + let file_contents = read_to_string(config_file_path) + .with_context(|| format!("Error reading file: {}", config_file_path.display()))?; + + let normalized_file_contents = normalize_contents(&file_contents); + + Ok(normalized_file_contents + .lines() + .filter_map(parse_config_line) + .collect()) +} + +fn normalize_contents(contents: &str) -> String { + let mut normalized_contents = contents.replace("\r\n", "\n").replace("\r", "\n"); + + let comment_regex = Regex::new(r"#.*$").unwrap(); + normalized_contents = comment_regex + .replace_all(&normalized_contents, "") + .to_string(); + + normalized_contents = normalized_contents.replace("\"", "").replace("'", ""); + + normalized_contents = normalized_contents.to_lowercase(); + + let empty_line_regex = Regex::new(r"^\s*$\n?").unwrap(); + normalized_contents = empty_line_regex + .replace_all(&normalized_contents, "") + .to_string(); + + if !normalized_contents.ends_with('\n') { + normalized_contents.push('\n'); + } + + normalized_contents +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::Write; + use tempfile::NamedTempFile; + + #[test] + fn test_is_valid_ipv6_prefix() { + // Valid prefixes + assert!(is_valid_ipv6_prefix("2a00:1111:1111:1111")); + assert!(is_valid_ipv6_prefix("2a00:111:11:11")); + assert!(is_valid_ipv6_prefix("2602:fb2b:100:10")); + + // Invalid prefixes + assert!(!is_valid_ipv6_prefix("2a00:1111:1111:1111:")); // Trailing colon + assert!(!is_valid_ipv6_prefix("2a00:1111:1111:1111:1111:1111")); // Too long + assert!(!is_valid_ipv6_prefix("abcd::1234:5678")); // Contains "::" + } + + #[test] + fn test_parse_config_line() { + assert_eq!( + parse_config_line("key=value"), + Some(("key".to_string(), "value".to_string())) + ); + assert_eq!( + parse_config_line(" key = value "), + Some(("key".to_string(), "value".to_string())) + ); + assert_eq!(parse_config_line(""), None); + assert_eq!(parse_config_line("# this is a comment"), None); + assert_eq!(parse_config_line("keywithoutvalue"), None); + assert_eq!( + parse_config_line("key=value=extra"), + Some(("key".to_string(), "value=extra".to_string())) + ); + } + + #[test] + fn test_config_map_from_path() -> Result<()> { + let mut temp_file = NamedTempFile::new()?; + let file_path = temp_file.path().to_path_buf(); + + writeln!(temp_file, "key1=value1")?; + writeln!(temp_file, "key2=value2")?; + writeln!(temp_file, "# This is a comment")?; + writeln!(temp_file, "key3=value3")?; + writeln!(temp_file)?; + + let config_map = config_map_from_path(&file_path)?; + + assert_eq!(config_map.get("key1"), Some(&"value1".to_string())); + assert_eq!(config_map.get("key2"), Some(&"value2".to_string())); + assert_eq!(config_map.get("key3"), Some(&"value3".to_string())); + assert_eq!(config_map.get("bad_key"), None); + + Ok(()) + } + + #[test] + fn test_config_map_from_path_crlf() -> Result<()> { + let mut temp_file = NamedTempFile::new()?; + let file_path = temp_file.path().to_path_buf(); + + writeln!(temp_file, "key4=value4\r\nkey5=value5\r\n")?; + + let config_map = config_map_from_path(&file_path)?; + + assert_eq!(config_map.get("key4"), Some(&"value4".to_string())); + assert_eq!(config_map.get("key5"), Some(&"value5".to_string())); + assert_eq!(config_map.get("bad_key"), None); + + Ok(()) + } + + #[test] + fn test_get_config_ini_settings() -> Result<()> { + // Test valid config.ini + let mut temp_file = NamedTempFile::new()?; + writeln!(temp_file, "\n\t\r")?; + writeln!(temp_file, "# COMMENT ")?; + writeln!(temp_file, "BAD INPUT ")?; + writeln!(temp_file, "\n\n\n\n")?; + writeln!(temp_file, "ipv6_prefix=2a00:fb01:400:200")?; + writeln!(temp_file, "ipv6_address=2a00:fb01:400:200::/64")?; + writeln!(temp_file, "ipv6_gateway=2a00:fb01:400:200::1")?; + writeln!(temp_file, "ipv4_address=212.71.124.178")?; + writeln!(temp_file, "ipv4_gateway=212.71.124.177")?; + writeln!(temp_file, "ipv4_prefix_length=28")?; + writeln!(temp_file, "domain=example.com")?; + writeln!(temp_file, "verbose=false")?; + + let temp_file_path = temp_file.path(); + + let config_ini_settings = get_config_ini_settings(temp_file_path)?; + + assert_eq!( + config_ini_settings.ipv6_prefix.unwrap(), + "2a00:fb01:400:200".to_string() + ); + assert_eq!( + config_ini_settings.ipv6_address.unwrap(), + "2a00:fb01:400:200::".parse::()? + ); + assert_eq!( + config_ini_settings.ipv6_gateway, + "2a00:fb01:400:200::1".parse::()? + ); + assert_eq!(config_ini_settings.ipv6_prefix_length, 64); + assert_eq!( + config_ini_settings.ipv4_address.unwrap(), + "212.71.124.178".parse::()? + ); + assert_eq!( + config_ini_settings.ipv4_gateway.unwrap(), + "212.71.124.177".parse::()? + ); + assert_eq!(config_ini_settings.ipv4_prefix_length.unwrap(), 28); + assert_eq!(config_ini_settings.domain, Some("example.com".to_string())); + assert!(!config_ini_settings.verbose); + + // Test ipv6_address without ipv6_prefix_length length + let mut temp_file = NamedTempFile::new()?; + writeln!(temp_file, "ipv6_address=2a00:fb01:400:200::")?; + let config_ini_settings = get_config_ini_settings(temp_file_path)?; + assert_eq!( + config_ini_settings.ipv6_address.unwrap(), + "2a00:fb01:400:200::".parse::()? + ); + assert_eq!(config_ini_settings.ipv6_prefix_length, 64); + + // Test missing ipv6 + let mut temp_file = NamedTempFile::new()?; + writeln!(temp_file, "ipv4_address=212.71.124.178")?; + writeln!(temp_file, "ipv4_gateway=212.71.124.177")?; + writeln!(temp_file, "ipv4_prefix_length=28")?; + + let temp_file_path = temp_file.path(); + let result = get_config_ini_settings(temp_file_path); + assert!(result.is_err()); + + // Test invalid IPv6 address + let mut temp_file = NamedTempFile::new()?; + writeln!(temp_file, "ipv6_prefix=invalid_ipv6_prefix")?; + writeln!(temp_file, "ipv6_gateway=2001:db8:85a3:0000::1")?; + writeln!(temp_file, "ipv4_address=192.168.1.1")?; + writeln!(temp_file, "ipv4_gateway=192.168.1.254")?; + writeln!(temp_file, "ipv4_prefix_length=24")?; + + let temp_file_path = temp_file.path(); + let result = get_config_ini_settings(temp_file_path); + assert!(result.is_err()); + + // Test missing prefix and address + let mut temp_file = NamedTempFile::new()?; + writeln!(temp_file, "ipv6_gateway=2001:db8:85a3:0000::1")?; + let result = get_config_ini_settings(temp_file_path); + assert!(result.is_err()); + + Ok(()) + } +} diff --git a/rs/ic_os/utils/src/deployment.rs b/rs/ic_os/config/src/deployment_json.rs similarity index 85% rename from rs/ic_os/utils/src/deployment.rs rename to rs/ic_os/config/src/deployment_json.rs index e8092aed20f..94c411f4068 100644 --- a/rs/ic_os/utils/src/deployment.rs +++ b/rs/ic_os/config/src/deployment_json.rs @@ -7,7 +7,7 @@ use serde_with::{serde_as, DisplayFromStr}; use url::Url; #[derive(PartialEq, Debug, Deserialize, Serialize)] -pub struct DeploymentJson { +pub struct DeploymentSettings { pub deployment: Deployment, pub logging: Logging, pub nns: Nns, @@ -42,7 +42,7 @@ pub struct Resources { pub cpu: Option, } -pub fn read_deployment_file(deployment_json: &Path) -> Result { +pub fn get_deployment_settings(deployment_json: &Path) -> Result { let file = File::open(deployment_json).context("failed to open deployment config file")?; serde_json::from_reader(&file).context("Invalid json content") } @@ -119,7 +119,7 @@ mod test { } }"#; - static DEPLOYMENT_STRUCT: Lazy = Lazy::new(|| { + static DEPLOYMENT_STRUCT: Lazy = Lazy::new(|| { let hosts = [ "elasticsearch-node-0.mercury.dfinity.systems:443", "elasticsearch-node-1.mercury.dfinity.systems:443", @@ -127,7 +127,7 @@ mod test { "elasticsearch-node-3.mercury.dfinity.systems:443", ] .join(" "); - DeploymentJson { + DeploymentSettings { deployment: Deployment { name: "mainnet".to_string(), mgmt_mac: None, @@ -159,7 +159,7 @@ mod test { } }"#; - static DEPLOYMENT_STRUCT_NO_MGMT_MAC: Lazy = Lazy::new(|| { + static DEPLOYMENT_STRUCT_NO_MGMT_MAC: Lazy = Lazy::new(|| { let hosts = [ "elasticsearch-node-0.mercury.dfinity.systems:443", "elasticsearch-node-1.mercury.dfinity.systems:443", @@ -167,7 +167,7 @@ mod test { "elasticsearch-node-3.mercury.dfinity.systems:443", ] .join(" "); - DeploymentJson { + DeploymentSettings { deployment: Deployment { name: "mainnet".to_string(), mgmt_mac: None, @@ -198,7 +198,7 @@ mod test { } }"#; - static DEPLOYMENT_STRUCT_NO_CPU_NO_MGMT_MAC: Lazy = Lazy::new(|| { + static DEPLOYMENT_STRUCT_NO_CPU_NO_MGMT_MAC: Lazy = Lazy::new(|| { let hosts = [ "elasticsearch-node-0.mercury.dfinity.systems:443", "elasticsearch-node-1.mercury.dfinity.systems:443", @@ -206,7 +206,7 @@ mod test { "elasticsearch-node-3.mercury.dfinity.systems:443", ] .join(" "); - DeploymentJson { + DeploymentSettings { deployment: Deployment { name: "mainnet".to_string(), mgmt_mac: None, @@ -238,7 +238,7 @@ mod test { } }"#; - static QEMU_CPU_DEPLOYMENT_STRUCT: Lazy = Lazy::new(|| { + static QEMU_CPU_DEPLOYMENT_STRUCT: Lazy = Lazy::new(|| { let hosts = [ "elasticsearch-node-0.mercury.dfinity.systems:443", "elasticsearch-node-1.mercury.dfinity.systems:443", @@ -246,7 +246,7 @@ mod test { "elasticsearch-node-3.mercury.dfinity.systems:443", ] .join(" "); - DeploymentJson { + DeploymentSettings { deployment: Deployment { name: "mainnet".to_string(), mgmt_mac: None, @@ -292,7 +292,7 @@ mod test { } }"#; - static MULTI_URL_STRUCT: Lazy = Lazy::new(|| { + static MULTI_URL_STRUCT: Lazy = Lazy::new(|| { let hosts = [ "elasticsearch-node-0.mercury.dfinity.systems:443", "elasticsearch-node-1.mercury.dfinity.systems:443", @@ -300,7 +300,7 @@ mod test { "elasticsearch-node-3.mercury.dfinity.systems:443", ] .join(" "); - DeploymentJson { + DeploymentSettings { deployment: Deployment { name: "mainnet".to_string(), mgmt_mac: None, @@ -322,27 +322,24 @@ mod test { #[test] fn deserialize_deployment() { - let parsed_deployment: DeploymentJson = { serde_json::from_str(DEPLOYMENT_STR).unwrap() }; + let parsed_deployment = { serde_json::from_str(DEPLOYMENT_STR).unwrap() }; assert_eq!(*DEPLOYMENT_STRUCT, parsed_deployment); - let parsed_deployment: DeploymentJson = - { serde_json::from_str(DEPLOYMENT_STR_NO_MGMT_MAC).unwrap() }; + let parsed_deployment = { serde_json::from_str(DEPLOYMENT_STR_NO_MGMT_MAC).unwrap() }; assert_eq!(*DEPLOYMENT_STRUCT_NO_MGMT_MAC, parsed_deployment); - let parsed_deployment: DeploymentJson = + let parsed_deployment = { serde_json::from_str(DEPLOYMENT_STR_NO_CPU_NO_MGMT_MAC).unwrap() }; assert_eq!(*DEPLOYMENT_STRUCT_NO_CPU_NO_MGMT_MAC, parsed_deployment); - let parsed_cpu_deployment: DeploymentJson = - { serde_json::from_str(QEMU_CPU_DEPLOYMENT_STR).unwrap() }; + let parsed_cpu_deployment = { serde_json::from_str(QEMU_CPU_DEPLOYMENT_STR).unwrap() }; assert_eq!(*QEMU_CPU_DEPLOYMENT_STRUCT, parsed_cpu_deployment); - let parsed_multi_url_deployment: DeploymentJson = - { serde_json::from_str(MULTI_URL_STR).unwrap() }; + let parsed_multi_url_deployment = { serde_json::from_str(MULTI_URL_STR).unwrap() }; assert_eq!(*MULTI_URL_STRUCT, parsed_multi_url_deployment); @@ -350,7 +347,7 @@ mod test { // slash, so the above case parses with a slash for the sake of the // writeback test below. In practice, we have used addresses without // this slash, so here we verify that this parses to the same value. - let parsed_multi_url_sans_slash_deployment: DeploymentJson = + let parsed_multi_url_sans_slash_deployment = { serde_json::from_str(MULTI_URL_SANS_SLASH_STR).unwrap() }; assert_eq!(*MULTI_URL_STRUCT, parsed_multi_url_sans_slash_deployment); @@ -358,44 +355,39 @@ mod test { // Exercise DeserializeOwned using serde_json::from_value. // DeserializeOwned is used by serde_json::from_reader, which is the // main entrypoint of this code, in practice. - let parsed_deployment: DeploymentJson = - { serde_json::from_value(DEPLOYMENT_VALUE.clone()).unwrap() }; + let parsed_deployment = { serde_json::from_value(DEPLOYMENT_VALUE.clone()).unwrap() }; assert_eq!(*DEPLOYMENT_STRUCT, parsed_deployment); } #[test] fn serialize_deployment() { - let serialized_deployment = - serde_json::to_string_pretty::(&DEPLOYMENT_STRUCT).unwrap(); + let serialized_deployment = serde_json::to_string_pretty(&*DEPLOYMENT_STRUCT).unwrap(); // DEPLOYMENT_STRUCT serializes to DEPLOYMENT_STR_NO_MGMT_MAC because mgmt_mac field is skipped in serialization assert_eq!(DEPLOYMENT_STR_NO_MGMT_MAC, serialized_deployment); let serialized_deployment = - serde_json::to_string_pretty::(&DEPLOYMENT_STRUCT_NO_CPU_NO_MGMT_MAC) - .unwrap(); + serde_json::to_string_pretty(&*DEPLOYMENT_STRUCT_NO_CPU_NO_MGMT_MAC).unwrap(); assert_eq!(DEPLOYMENT_STR_NO_CPU_NO_MGMT_MAC, serialized_deployment); let serialized_deployment = - serde_json::to_string_pretty::(&DEPLOYMENT_STRUCT_NO_MGMT_MAC).unwrap(); + serde_json::to_string_pretty(&*DEPLOYMENT_STRUCT_NO_MGMT_MAC).unwrap(); assert_eq!(DEPLOYMENT_STR_NO_MGMT_MAC, serialized_deployment); let serialized_deployment = - serde_json::to_string_pretty::(&DEPLOYMENT_STRUCT_NO_CPU_NO_MGMT_MAC) - .unwrap(); + serde_json::to_string_pretty(&*DEPLOYMENT_STRUCT_NO_CPU_NO_MGMT_MAC).unwrap(); assert_eq!(DEPLOYMENT_STR_NO_CPU_NO_MGMT_MAC, serialized_deployment); let serialized_deployment = - serde_json::to_string_pretty::(&QEMU_CPU_DEPLOYMENT_STRUCT).unwrap(); + serde_json::to_string_pretty(&*QEMU_CPU_DEPLOYMENT_STRUCT).unwrap(); assert_eq!(QEMU_CPU_DEPLOYMENT_STR, serialized_deployment); - let serialized_deployment = - serde_json::to_string_pretty::(&MULTI_URL_STRUCT).unwrap(); + let serialized_deployment = serde_json::to_string_pretty(&*MULTI_URL_STRUCT).unwrap(); assert_eq!(MULTI_URL_STR, serialized_deployment); } diff --git a/rs/ic_os/config/src/lib.rs b/rs/ic_os/config/src/lib.rs index ef505296d2c..c49f689cb53 100644 --- a/rs/ic_os/config/src/lib.rs +++ b/rs/ic_os/config/src/lib.rs @@ -1,38 +1,138 @@ -use std::collections::HashMap; -use std::fs::read_to_string; -use std::path::Path; +pub mod config_ini; +pub mod deployment_json; +pub mod types; use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; +use std::fs::{create_dir_all, File}; +use std::io::Write; +use std::path::Path; -pub type ConfigMap = HashMap; - -pub static DEFAULT_SETUPOS_CONFIG_FILE_PATH: &str = "/var/ic/config/config.ini"; +pub static DEFAULT_SETUPOS_CONFIG_OBJECT_PATH: &str = "/var/ic/config/config.json"; +pub static DEFAULT_SETUPOS_CONFIG_INI_FILE_PATH: &str = "/config/config.ini"; pub static DEFAULT_SETUPOS_DEPLOYMENT_JSON_PATH: &str = "/data/deployment.json"; +pub static DEFAULT_SETUPOS_NNS_PUBLIC_KEY_PATH: &str = "/data/nns_public_key.pem"; +pub static DEFAULT_SETUPOS_SSH_AUTHORIZED_KEYS_PATH: &str = "/config/ssh_authorized_keys"; +pub static DEFAULT_SETUPOS_NODE_OPERATOR_PRIVATE_KEY_PATH: &str = + "/config/node_operator_private_key.pem"; + +pub static DEFAULT_SETUPOS_HOSTOS_CONFIG_OBJECT_PATH: &str = "/var/ic/config/config-hostos.json"; -pub static DEFAULT_HOSTOS_CONFIG_FILE_PATH: &str = "/boot/config/config.ini"; +pub static DEFAULT_HOSTOS_CONFIG_INI_FILE_PATH: &str = "/boot/config/config.ini"; pub static DEFAULT_HOSTOS_DEPLOYMENT_JSON_PATH: &str = "/boot/config/deployment.json"; -fn parse_config_line(line: &str) -> Option<(String, String)> { - // Skip blank lines and comments - if line.is_empty() || line.trim().starts_with('#') { - return None; - } +pub fn serialize_and_write_config(path: &Path, config: &T) -> Result<()> { + let serialized_config = + serde_json::to_string_pretty(config).expect("Failed to serialize configuration"); - let parts: Vec<&str> = line.splitn(2, '=').collect(); - if parts.len() == 2 { - Some((parts[0].trim().into(), parts[1].trim().into())) - } else { - eprintln!("Warning: skipping config line due to unrecognized format: \"{line}\""); - eprintln!("Expected format: \"=\""); - None + if let Some(parent) = path.parent() { + create_dir_all(parent)?; } + + let mut file = File::create(path)?; + file.write_all(serialized_config.as_bytes())?; + Ok(()) +} + +pub fn deserialize_config Deserialize<'de>>(file_path: &str) -> Result { + let file = File::open(file_path).context(format!("Failed to open file: {}", file_path))?; + serde_json::from_reader(file).context(format!( + "Failed to deserialize JSON from file: {}", + file_path + )) } -pub fn config_map_from_path(config_file_path: &Path) -> Result { - let file_contents = read_to_string(config_file_path) - .with_context(|| format!("Error reading file: {}", config_file_path.display()))?; - Ok(file_contents - .lines() - .filter_map(parse_config_line) - .collect()) +#[cfg(test)] +mod tests { + use super::*; + use std::path::PathBuf; + use types::{ + GuestOSConfig, GuestOSSettings, GuestosDevConfig, HostOSConfig, HostOSSettings, + ICOSSettings, Logging, NetworkSettings, SetupOSConfig, SetupOSSettings, + }; + + #[test] + fn test_serialize_and_deserialize() { + let network_settings = NetworkSettings { + ipv6_prefix: None, + ipv6_address: None, + ipv6_prefix_length: 64_u8, + ipv6_gateway: "2001:db8::1".parse().unwrap(), + ipv4_address: None, + ipv4_gateway: None, + ipv4_prefix_length: None, + domain: None, + mgmt_mac: None, + }; + let logging = Logging { + elasticsearch_hosts: [ + "elasticsearch-node-0.mercury.dfinity.systems:443", + "elasticsearch-node-1.mercury.dfinity.systems:443", + "elasticsearch-node-2.mercury.dfinity.systems:443", + "elasticsearch-node-3.mercury.dfinity.systems:443", + ] + .join(" "), + elasticsearch_tags: None, + }; + let icos_settings = ICOSSettings { + logging, + nns_public_key_path: PathBuf::from("/path/to/key"), + nns_urls: vec!["http://localhost".parse().unwrap()], + hostname: "mainnet".to_string(), + node_operator_private_key_path: None, + ssh_authorized_keys_path: None, + }; + let setupos_settings = SetupOSSettings; + let hostos_settings = HostOSSettings { + vm_memory: 490, + vm_cpu: "kvm".to_string(), + verbose: false, + }; + let guestos_settings = GuestOSSettings { + ic_crypto_path: None, + ic_state_path: None, + ic_registry_local_store_path: None, + guestos_dev: GuestosDevConfig::default(), + }; + + let setupos_config_struct = SetupOSConfig { + network_settings: network_settings.clone(), + icos_settings: icos_settings.clone(), + setupos_settings: setupos_settings.clone(), + hostos_settings: hostos_settings.clone(), + guestos_settings: guestos_settings.clone(), + }; + let hostos_config_struct = HostOSConfig { + network_settings: network_settings.clone(), + icos_settings: icos_settings.clone(), + hostos_settings: hostos_settings.clone(), + guestos_settings: guestos_settings.clone(), + }; + let guestos_config_struct = GuestOSConfig { + network_settings: network_settings.clone(), + icos_settings: icos_settings.clone(), + guestos_settings: guestos_settings.clone(), + }; + + fn serialize_and_deserialize(config: &T) + where + T: serde::Serialize + + serde::de::DeserializeOwned + + std::cmp::PartialEq + + std::fmt::Debug, + { + // Test serialization + let buffer = serde_json::to_vec_pretty(config).expect("Failed to serialize config"); + assert!(!buffer.is_empty()); + + // Test deserialization + let deserialized_config: T = + serde_json::from_slice(&buffer).expect("Failed to deserialize config"); + assert_eq!(*config, deserialized_config); + } + + serialize_and_deserialize(&setupos_config_struct); + serialize_and_deserialize(&hostos_config_struct); + serialize_and_deserialize(&guestos_config_struct); + } } diff --git a/rs/ic_os/config/src/main.rs b/rs/ic_os/config/src/main.rs new file mode 100644 index 00000000000..65e825ee95c --- /dev/null +++ b/rs/ic_os/config/src/main.rs @@ -0,0 +1,171 @@ +use anyhow::Result; +use clap::{Parser, Subcommand}; +use config::config_ini::{get_config_ini_settings, ConfigIniSettings}; +use config::deployment_json::get_deployment_settings; +use config::serialize_and_write_config; +use std::fs::File; +use std::path::{Path, PathBuf}; + +use config::types::{ + GuestOSSettings, HostOSConfig, HostOSSettings, ICOSSettings, Logging, NetworkSettings, + SetupOSConfig, SetupOSSettings, +}; + +#[derive(Subcommand)] +pub enum Commands { + /// Creates SetupOSConfig object + CreateSetuposConfig { + #[arg(long, default_value = config::DEFAULT_SETUPOS_CONFIG_INI_FILE_PATH, value_name = "config.ini")] + config_ini_path: PathBuf, + + #[arg(long, default_value = config::DEFAULT_SETUPOS_DEPLOYMENT_JSON_PATH, value_name = "deployment.json")] + deployment_json_path: PathBuf, + + #[arg(long, default_value = config::DEFAULT_SETUPOS_NNS_PUBLIC_KEY_PATH, value_name = "nns_public_key.pem")] + nns_public_key_path: PathBuf, + + #[arg(long, default_value = config::DEFAULT_SETUPOS_SSH_AUTHORIZED_KEYS_PATH, value_name = "ssh_authorized_keys")] + ssh_authorized_keys_path: PathBuf, + + #[arg(long, default_value = config::DEFAULT_SETUPOS_NODE_OPERATOR_PRIVATE_KEY_PATH, value_name = "node_operator_private_key.pem")] + node_operator_private_key_path: PathBuf, + + #[arg(long, default_value = config::DEFAULT_SETUPOS_CONFIG_OBJECT_PATH, value_name = "config.json")] + setupos_config_json_path: PathBuf, + }, + /// Creates HostOSConfig object from existing SetupOS config.json file + GenerateHostosConfig { + #[arg(long, default_value = config::DEFAULT_SETUPOS_CONFIG_OBJECT_PATH, value_name = "config.json")] + setupos_config_json_path: PathBuf, + #[arg(long, default_value = config::DEFAULT_SETUPOS_HOSTOS_CONFIG_OBJECT_PATH, value_name = "config-hostos.json")] + hostos_config_json_path: PathBuf, + }, +} + +#[derive(Parser)] +#[command()] +struct ConfigArgs { + #[command(subcommand)] + command: Option, +} + +pub fn main() -> Result<()> { + let opts = ConfigArgs::parse(); + + match opts.command { + Some(Commands::CreateSetuposConfig { + config_ini_path, + deployment_json_path, + nns_public_key_path, + ssh_authorized_keys_path, + node_operator_private_key_path, + setupos_config_json_path, + }) => { + // get config.ini settings + let config_ini_settings = get_config_ini_settings(&config_ini_path)?; + let ConfigIniSettings { + ipv6_prefix, + ipv6_address, + ipv6_prefix_length, + ipv6_gateway, + ipv4_address, + ipv4_gateway, + ipv4_prefix_length, + domain, + verbose, + } = config_ini_settings; + + // get deployment.json variables + let deployment_json_settings = get_deployment_settings(&deployment_json_path)?; + + let network_settings = NetworkSettings { + ipv6_prefix, + ipv6_address, + ipv6_prefix_length, + ipv6_gateway, + ipv4_address, + ipv4_gateway, + ipv4_prefix_length, + domain, + mgmt_mac: deployment_json_settings.deployment.mgmt_mac, + }; + + let logging = Logging { + elasticsearch_hosts: deployment_json_settings.logging.hosts.to_string(), + elasticsearch_tags: None, + }; + + let icos_settings = ICOSSettings { + logging, + nns_public_key_path: nns_public_key_path.to_path_buf(), + nns_urls: deployment_json_settings.nns.url.clone(), + hostname: deployment_json_settings.deployment.name.to_string(), + node_operator_private_key_path: node_operator_private_key_path + .exists() + .then_some(node_operator_private_key_path), + ssh_authorized_keys_path: ssh_authorized_keys_path + .exists() + .then_some(ssh_authorized_keys_path), + }; + + let setupos_settings = SetupOSSettings; + + let hostos_settings = HostOSSettings { + vm_memory: deployment_json_settings.resources.memory, + vm_cpu: deployment_json_settings + .resources + .cpu + .clone() + .unwrap_or("kvm".to_string()), + verbose, + }; + + let guestos_settings = GuestOSSettings::default(); + + let setupos_config = SetupOSConfig { + network_settings, + icos_settings, + setupos_settings, + hostos_settings, + guestos_settings, + }; + + let setupos_config_json_path = Path::new(&setupos_config_json_path); + serialize_and_write_config(setupos_config_json_path, &setupos_config)?; + + println!( + "SetupOSConfig has been written to {}", + setupos_config_json_path.display() + ); + + Ok(()) + } + Some(Commands::GenerateHostosConfig { + setupos_config_json_path, + hostos_config_json_path, + }) => { + let setupos_config_json_path = Path::new(&setupos_config_json_path); + + let setupos_config: SetupOSConfig = + serde_json::from_reader(File::open(setupos_config_json_path)?)?; + + let hostos_config = HostOSConfig { + network_settings: setupos_config.network_settings, + icos_settings: setupos_config.icos_settings, + hostos_settings: setupos_config.hostos_settings, + guestos_settings: setupos_config.guestos_settings, + }; + + let hostos_config_json_path = Path::new(&hostos_config_json_path); + serialize_and_write_config(hostos_config_json_path, &hostos_config)?; + + println!( + "HostOSConfig has been written to {}", + hostos_config_json_path.display() + ); + + Ok(()) + } + None => Ok(()), + } +} diff --git a/rs/ic_os/config/src/types.rs b/rs/ic_os/config/src/types.rs new file mode 100644 index 00000000000..7562b55e12e --- /dev/null +++ b/rs/ic_os/config/src/types.rs @@ -0,0 +1,125 @@ +use ic_types::malicious_behaviour::MaliciousBehaviour; +use serde::{Deserialize, Serialize}; +use std::net::{Ipv4Addr, Ipv6Addr}; +use std::path::PathBuf; +use url::Url; + +/// SetupOS configuration. User-facing configuration files +/// (e.g., `config.ini`, `deployment.json`) are transformed into `SetupOSConfig`. +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +pub struct SetupOSConfig { + pub network_settings: NetworkSettings, + pub icos_settings: ICOSSettings, + pub setupos_settings: SetupOSSettings, + pub hostos_settings: HostOSSettings, + pub guestos_settings: GuestOSSettings, +} + +/// HostOS configuration. In production, this struct inherits settings from `SetupOSConfig`. +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +pub struct HostOSConfig { + pub network_settings: NetworkSettings, + pub icos_settings: ICOSSettings, + pub hostos_settings: HostOSSettings, + pub guestos_settings: GuestOSSettings, +} + +/// GuestOS configuration. In production, this struct inherits settings from `HostOSConfig`. +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +pub struct GuestOSConfig { + pub network_settings: NetworkSettings, + pub icos_settings: ICOSSettings, + pub guestos_settings: GuestOSSettings, +} + +/// Placeholder for SetupOS-specific settings. +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +pub struct SetupOSSettings; + +/// HostOS-specific settings. +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +pub struct HostOSSettings { + pub vm_memory: u32, + pub vm_cpu: String, + pub verbose: bool, +} + +/// GuestOS-specific settings. +#[derive(Serialize, Deserialize, Debug, PartialEq, Default, Clone)] +pub struct GuestOSSettings { + /// Externally generated cryptographic keys. + /// Must be a directory with contents matching the internal representation of the ic_crypto directory. + /// When given, this provides the private keys of the node. + /// If not given, the node will generate its own private/public key pair. + pub ic_crypto_path: Option, + pub ic_state_path: Option, + /// Initial registry state. + /// Must be a directory with contents matching the internal representation of the ic_registry_local_store. + /// When given, this provides the initial state of the registry. + /// If not given, the node will fetch (initial) registry state from the NNS. + pub ic_registry_local_store_path: Option, + pub guestos_dev: GuestosDevConfig, +} + +/// GuestOS development configuration. These settings are strictly used for development images. +#[derive(Serialize, Deserialize, Debug, PartialEq, Default, Clone)] +pub struct GuestosDevConfig { + pub backup_spool: Option, + pub malicious_behavior: Option, + pub query_stats_epoch_length: Option, + pub bitcoind_addr: Option, + pub jaeger_addr: Option, + pub socks_proxy: Option, +} + +/// Configures the usage of the backup spool directory. +#[derive(Serialize, Deserialize, Debug, PartialEq, Default, Clone)] +pub struct BackupSpoolSettings { + /// The maximum age of any file or directory kept in the backup spool. + pub backup_retention_time_seconds: Option, + /// The interval at which the backup spool directory will be scanned for files to delete. + pub backup_purging_interval_seconds: Option, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +pub struct NetworkSettings { + // Config.ini can specify ipv6_prefix and ipv6_gateway, or just an ipv6_address. + // ipv6_address takes precedence. Some tests provide only ipv6_address. + pub ipv6_prefix: Option, + pub ipv6_address: Option, + pub ipv6_prefix_length: u8, + pub ipv6_gateway: Ipv6Addr, + pub ipv4_address: Option, + pub ipv4_gateway: Option, + pub ipv4_prefix_length: Option, + pub domain: Option, + pub mgmt_mac: Option, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +pub struct ICOSSettings { + pub logging: Logging, + /// This file must be a text file containing the public key of the NNS to be used. + pub nns_public_key_path: PathBuf, + /// The URL (HTTP) of the NNS node(s). + pub nns_urls: Vec, + pub hostname: String, + /// This file contains the Node Operator private key, + /// which is registered with the NNS and used to sign the IC join request. + pub node_operator_private_key_path: Option, + /// This directory contains individual files named `admin`, `backup`, `readonly`. + /// The contents of these files serve as `authorized_keys` for their respective role account. + /// This means that, for example, `accounts_ssh_authorized_keys/admin` + /// is transferred to `~admin/.ssh/authorized_keys` on the target system. + /// backup and readonly can only be modified via an NNS proposal + /// and are in place for subnet recovery or issue debugging purposes. + pub ssh_authorized_keys_path: Option, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +pub struct Logging { + /// Space-separated lists of hosts to ship logs to. + pub elasticsearch_hosts: String, + /// Space-separated list of tags to apply to exported log records. + pub elasticsearch_tags: Option, +} diff --git a/rs/ic_os/dev_test_tools/setupos-inject-configuration/BUILD.bazel b/rs/ic_os/dev_test_tools/setupos-inject-configuration/BUILD.bazel index 99976177f26..3aa287b6b04 100644 --- a/rs/ic_os/dev_test_tools/setupos-inject-configuration/BUILD.bazel +++ b/rs/ic_os/dev_test_tools/setupos-inject-configuration/BUILD.bazel @@ -5,7 +5,7 @@ package(default_visibility = ["//rs:ic-os-pkg"]) DEPENDENCIES = [ # Keep sorted. "//rs/ic_os/build_tools/partition_tools", - "//rs/ic_os/utils", + "//rs/ic_os/config:config_lib", "@crate_index//:anyhow", "@crate_index//:clap", "@crate_index//:serde", diff --git a/rs/ic_os/dev_test_tools/setupos-inject-configuration/Cargo.toml b/rs/ic_os/dev_test_tools/setupos-inject-configuration/Cargo.toml index 6662c930263..16bb8b431f4 100644 --- a/rs/ic_os/dev_test_tools/setupos-inject-configuration/Cargo.toml +++ b/rs/ic_os/dev_test_tools/setupos-inject-configuration/Cargo.toml @@ -13,4 +13,4 @@ serde_with = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } url = { workspace = true } -utils = { path = "../../utils"} +config = { path = "../../config"} diff --git a/rs/ic_os/dev_test_tools/setupos-inject-configuration/src/main.rs b/rs/ic_os/dev_test_tools/setupos-inject-configuration/src/main.rs index e85eaeaa7e8..206cfdb0bd6 100644 --- a/rs/ic_os/dev_test_tools/setupos-inject-configuration/src/main.rs +++ b/rs/ic_os/dev_test_tools/setupos-inject-configuration/src/main.rs @@ -12,8 +12,8 @@ use clap::{Args, Parser}; use tempfile::NamedTempFile; use url::Url; +use config::deployment_json::DeploymentSettings; use partition_tools::{ext::ExtPartition, fat::FatPartition, Partition}; -use utils::deployment::DeploymentJson; const SERVICE_NAME: &str = "setupos-inject-configuration"; @@ -227,7 +227,7 @@ async fn write_public_keys(path: &Path, ks: Vec) -> Result<(), Error> { async fn update_deployment(path: &Path, cfg: &DeploymentConfig) -> Result<(), Error> { let mut deployment_json = { let f = File::open(path).context("failed to open deployment config file")?; - let deployment_json: DeploymentJson = serde_json::from_reader(f)?; + let deployment_json: DeploymentSettings = serde_json::from_reader(f)?; deployment_json }; diff --git a/rs/ic_os/network/BUILD.bazel b/rs/ic_os/network/BUILD.bazel index 5929beb3512..64f1f412458 100644 --- a/rs/ic_os/network/BUILD.bazel +++ b/rs/ic_os/network/BUILD.bazel @@ -4,7 +4,7 @@ package(default_visibility = ["//rs:ic-os-pkg"]) DEPENDENCIES = [ # Keep sorted. - "//rs/ic_os/config", + "//rs/ic_os/config:config_lib", "//rs/ic_os/utils", "@crate_index//:anyhow", "@crate_index//:hex", diff --git a/rs/ic_os/network/src/info.rs b/rs/ic_os/network/src/info.rs index b69d2eb8b48..88109c8e71a 100644 --- a/rs/ic_os/network/src/info.rs +++ b/rs/ic_os/network/src/info.rs @@ -2,7 +2,7 @@ use std::net::Ipv6Addr; use anyhow::{bail, Context, Result}; -use config::ConfigMap; +use config::config_ini::ConfigMap; #[derive(Debug)] pub struct NetworkInfo { diff --git a/rs/ic_os/os_tools/guestos_tool/BUILD.bazel b/rs/ic_os/os_tools/guestos_tool/BUILD.bazel index 08f6b8b58fe..6ce380e64ff 100644 --- a/rs/ic_os/os_tools/guestos_tool/BUILD.bazel +++ b/rs/ic_os/os_tools/guestos_tool/BUILD.bazel @@ -4,7 +4,7 @@ package(default_visibility = ["//rs:ic-os-pkg"]) DEPENDENCIES = [ # Keep sorted. - "//rs/ic_os/config", + "//rs/ic_os/config:config_lib", "//rs/ic_os/network", "//rs/ic_os/utils", "@crate_index//:anyhow", diff --git a/rs/ic_os/os_tools/guestos_tool/src/generate_network_config.rs b/rs/ic_os/os_tools/guestos_tool/src/generate_network_config.rs index ad1da579da9..0f4eb2b2cb3 100644 --- a/rs/ic_os/os_tools/guestos_tool/src/generate_network_config.rs +++ b/rs/ic_os/os_tools/guestos_tool/src/generate_network_config.rs @@ -6,7 +6,7 @@ use std::str::FromStr; use anyhow::{bail, Context, Result}; -use config::config_map_from_path; +use config::config_ini::config_map_from_path; use network::interfaces::{get_interface_name as get_valid_interface_name, get_interface_paths}; use utils::get_command_stdout; diff --git a/rs/ic_os/os_tools/hostos_tool/BUILD.bazel b/rs/ic_os/os_tools/hostos_tool/BUILD.bazel index 5d7a691c68d..ea5395c09e4 100644 --- a/rs/ic_os/os_tools/hostos_tool/BUILD.bazel +++ b/rs/ic_os/os_tools/hostos_tool/BUILD.bazel @@ -4,7 +4,7 @@ package(default_visibility = ["//rs:ic-os-pkg"]) DEPENDENCIES = [ # Keep sorted. - "//rs/ic_os/config", + "//rs/ic_os/config:config_lib", "//rs/ic_os/network", "//rs/ic_os/utils", "@crate_index//:anyhow", diff --git a/rs/ic_os/os_tools/hostos_tool/src/main.rs b/rs/ic_os/os_tools/hostos_tool/src/main.rs index 4e3f442ee74..c05a053ca42 100644 --- a/rs/ic_os/os_tools/hostos_tool/src/main.rs +++ b/rs/ic_os/os_tools/hostos_tool/src/main.rs @@ -3,16 +3,15 @@ use std::path::Path; use anyhow::{anyhow, Context, Result}; use clap::{Parser, Subcommand}; -use config::{ - config_map_from_path, DEFAULT_HOSTOS_CONFIG_FILE_PATH, DEFAULT_HOSTOS_DEPLOYMENT_JSON_PATH, -}; +use config::config_ini::config_map_from_path; +use config::deployment_json::get_deployment_settings; +use config::{DEFAULT_HOSTOS_CONFIG_INI_FILE_PATH, DEFAULT_HOSTOS_DEPLOYMENT_JSON_PATH}; use network::generate_network_config; use network::info::NetworkInfo; use network::ipv6::generate_ipv6_address; use network::mac_address::{generate_mac_address, FormattedMacAddress}; use network::node_type::NodeType; use network::systemd::DEFAULT_SYSTEMD_NETWORK_DIR; -use utils::deployment::read_deployment_file; use utils::to_cidr; #[derive(Subcommand)] @@ -35,7 +34,7 @@ pub enum Commands { #[derive(Parser)] struct HostOSArgs { - #[arg(short, long, default_value_t = DEFAULT_HOSTOS_CONFIG_FILE_PATH.to_string(), value_name = "FILE")] + #[arg(short, long, default_value_t = DEFAULT_HOSTOS_CONFIG_INI_FILE_PATH.to_string(), value_name = "FILE")] config: String, #[arg(short, long, default_value_t = DEFAULT_HOSTOS_DEPLOYMENT_JSON_PATH.to_string(), value_name = "FILE")] @@ -64,9 +63,9 @@ pub fn main() -> Result<()> { let network_info = NetworkInfo::from_config_map(&config_map)?; eprintln!("Network info config: {:?}", &network_info); - let deployment = read_deployment_file(Path::new(&opts.deployment_file)); + let deployment_settings = get_deployment_settings(Path::new(&opts.deployment_file)); - let deployment_name: Option<&str> = match &deployment { + let deployment_name: Option<&str> = match &deployment_settings { Ok(deployment) => Some(deployment.deployment.name.as_str()), Err(e) => { eprintln!("Error retrieving deployment file: {e}. Continuing without it"); @@ -74,7 +73,7 @@ pub fn main() -> Result<()> { } }; - let mgmt_mac: Option<&str> = match &deployment { + let mgmt_mac: Option<&str> = match &deployment_settings { Ok(deployment) => deployment.deployment.mgmt_mac.as_deref(), Err(_) => None, }; @@ -88,9 +87,9 @@ pub fn main() -> Result<()> { ) } Some(Commands::GenerateIpv6Address { node_type }) => { - let deployment = read_deployment_file(Path::new(&opts.deployment_file)) + let deployment_settings = get_deployment_settings(Path::new(&opts.deployment_file)) .context("Please specify a valid deployment file with '--deployment-file'")?; - eprintln!("Deployment config: {:?}", deployment); + eprintln!("Deployment config: {:?}", deployment_settings); let config_map = config_map_from_path(Path::new(&opts.config)) .context("Please specify a valid config file with '--config'")?; @@ -101,9 +100,9 @@ pub fn main() -> Result<()> { let node_type = node_type.parse::()?; let mac = generate_mac_address( - &deployment.deployment.name, + &deployment_settings.deployment.name, &node_type, - deployment.deployment.mgmt_mac.as_deref(), + deployment_settings.deployment.mgmt_mac.as_deref(), )?; let ipv6_prefix = network_info .ipv6_prefix @@ -120,15 +119,15 @@ pub fn main() -> Result<()> { let network_info = NetworkInfo::from_config_map(&config_map)?; eprintln!("Network info config: {:?}", &network_info); - let deployment = read_deployment_file(Path::new(&opts.deployment_file)) + let deployment_settings = get_deployment_settings(Path::new(&opts.deployment_file)) .context("Please specify a valid deployment file with '--deployment-file'")?; - eprintln!("Deployment config: {:?}", deployment); + eprintln!("Deployment config: {:?}", deployment_settings); let node_type = node_type.parse::()?; let mac = generate_mac_address( - &deployment.deployment.name, + &deployment_settings.deployment.name, &node_type, - deployment.deployment.mgmt_mac.as_deref(), + deployment_settings.deployment.mgmt_mac.as_deref(), )?; let mac = FormattedMacAddress::from(&mac); println!("{}", mac.get()); diff --git a/rs/ic_os/os_tools/setupos_tool/BUILD.bazel b/rs/ic_os/os_tools/setupos_tool/BUILD.bazel index e393c8a31aa..504a29a3f68 100644 --- a/rs/ic_os/os_tools/setupos_tool/BUILD.bazel +++ b/rs/ic_os/os_tools/setupos_tool/BUILD.bazel @@ -4,7 +4,7 @@ package(default_visibility = ["//rs:ic-os-pkg"]) DEPENDENCIES = [ # Keep sorted. - "//rs/ic_os/config", + "//rs/ic_os/config:config_lib", "//rs/ic_os/network", "//rs/ic_os/utils", "@crate_index//:anyhow", diff --git a/rs/ic_os/os_tools/setupos_tool/src/main.rs b/rs/ic_os/os_tools/setupos_tool/src/main.rs index 22ea56e50d6..0bcda31d0ba 100644 --- a/rs/ic_os/os_tools/setupos_tool/src/main.rs +++ b/rs/ic_os/os_tools/setupos_tool/src/main.rs @@ -3,16 +3,15 @@ use std::path::Path; use anyhow::{anyhow, Context, Result}; use clap::{Parser, Subcommand}; -use config::{ - config_map_from_path, DEFAULT_SETUPOS_CONFIG_FILE_PATH, DEFAULT_SETUPOS_DEPLOYMENT_JSON_PATH, -}; +use config::config_ini::config_map_from_path; +use config::deployment_json::get_deployment_settings; +use config::{DEFAULT_SETUPOS_CONFIG_INI_FILE_PATH, DEFAULT_SETUPOS_DEPLOYMENT_JSON_PATH}; use network::generate_network_config; use network::info::NetworkInfo; use network::ipv6::generate_ipv6_address; use network::mac_address::{generate_mac_address, FormattedMacAddress}; use network::node_type::NodeType; use network::systemd::DEFAULT_SYSTEMD_NETWORK_DIR; -use utils::deployment::read_deployment_file; use utils::to_cidr; #[derive(Subcommand)] @@ -35,7 +34,7 @@ pub enum Commands { #[derive(Parser)] struct SetupOSArgs { - #[arg(short, long, default_value_t = DEFAULT_SETUPOS_CONFIG_FILE_PATH.to_string(), value_name = "FILE")] + #[arg(short, long, default_value_t = DEFAULT_SETUPOS_CONFIG_INI_FILE_PATH.to_string(), value_name = "FILE")] config: String, #[arg(short, long, default_value_t = DEFAULT_SETUPOS_DEPLOYMENT_JSON_PATH.to_string(), value_name = "FILE")] @@ -63,8 +62,8 @@ pub fn main() -> Result<()> { let network_info = NetworkInfo::from_config_map(&config_map)?; eprintln!("Network info config: {:?}", &network_info); - let deployment = read_deployment_file(Path::new(&opts.deployment_file)); - let deployment_name: Option<&str> = match &deployment { + let deployment_settings = get_deployment_settings(Path::new(&opts.deployment_file)); + let deployment_name: Option<&str> = match &deployment_settings { Ok(deployment) => Some(deployment.deployment.name.as_str()), Err(e) => { eprintln!("Error retrieving deployment file: {e}. Continuing without it"); @@ -72,7 +71,7 @@ pub fn main() -> Result<()> { } }; - let mgmt_mac: Option<&str> = match &deployment { + let mgmt_mac: Option<&str> = match &deployment_settings { Ok(deployment) => deployment.deployment.mgmt_mac.as_deref(), Err(_) => None, }; @@ -86,9 +85,9 @@ pub fn main() -> Result<()> { ) } Some(Commands::GenerateIpv6Address { node_type }) => { - let deployment = read_deployment_file(Path::new(&opts.deployment_file)) + let deployment_settings = get_deployment_settings(Path::new(&opts.deployment_file)) .context("Please specify a valid deployment file with '--deployment-file'")?; - eprintln!("Deployment config: {:?}", deployment); + eprintln!("Deployment config: {:?}", deployment_settings); let config_map = config_map_from_path(Path::new(&opts.config)) .context("Please specify a valid config file with '--config'")?; @@ -100,9 +99,9 @@ pub fn main() -> Result<()> { let node_type = node_type.parse::()?; let mac = generate_mac_address( - &deployment.deployment.name, + &deployment_settings.deployment.name, &node_type, - deployment.deployment.mgmt_mac.as_deref(), + deployment_settings.deployment.mgmt_mac.as_deref(), )?; let ipv6_prefix = network_info .ipv6_prefix @@ -119,15 +118,15 @@ pub fn main() -> Result<()> { let network_info = NetworkInfo::from_config_map(&config_map)?; eprintln!("Network info config: {:?}", &network_info); - let deployment = read_deployment_file(Path::new(&opts.deployment_file)) + let deployment_settings = get_deployment_settings(Path::new(&opts.deployment_file)) .context("Please specify a valid deployment file with '--deployment-file'")?; - eprintln!("Deployment config: {:?}", deployment); + eprintln!("Deployment config: {:?}", deployment_settings); let node_type = node_type.parse::()?; let mac = generate_mac_address( - &deployment.deployment.name, + &deployment_settings.deployment.name, &node_type, - deployment.deployment.mgmt_mac.as_deref(), + deployment_settings.deployment.mgmt_mac.as_deref(), )?; let mac = FormattedMacAddress::from(&mac); println!("{}", mac.get()); diff --git a/rs/ic_os/release/BUILD.bazel b/rs/ic_os/release/BUILD.bazel index 7e3a9740a2c..d5a5eb40598 100644 --- a/rs/ic_os/release/BUILD.bazel +++ b/rs/ic_os/release/BUILD.bazel @@ -8,6 +8,7 @@ OBJECTS = { "hostos_tool": "//rs/ic_os/os_tools/hostos_tool:hostos_tool", "nft-exporter": "//rs/ic_os/nft_exporter:nft-exporter", "setupos_tool": "//rs/ic_os/os_tools/setupos_tool:setupos_tool", + "config": "//rs/ic_os/config:config", "vsock_guest": "//rs/ic_os/vsock/guest:vsock_guest", "vsock_host": "//rs/ic_os/vsock/host:vsock_host", "metrics-proxy": "@crate_index//:metrics-proxy__metrics-proxy", diff --git a/rs/ic_os/utils/BUILD.bazel b/rs/ic_os/utils/BUILD.bazel index 1fe7e1eb715..53b0a22cfb8 100644 --- a/rs/ic_os/utils/BUILD.bazel +++ b/rs/ic_os/utils/BUILD.bazel @@ -5,15 +5,6 @@ package(default_visibility = ["//rs:ic-os-pkg"]) DEPENDENCIES = [ # Keep sorted. "@crate_index//:anyhow", - "@crate_index//:serde", - "@crate_index//:serde_json", - "@crate_index//:serde_with", - "@crate_index//:url", -] - -DEV_DEPENDENCIES = [ - # Keep sorted. - "@crate_index//:once_cell", ] rust_library( @@ -32,5 +23,5 @@ rust_test( name = "test", size = "small", crate = ":utils", - deps = DEPENDENCIES + DEV_DEPENDENCIES, + deps = DEPENDENCIES, ) diff --git a/rs/ic_os/utils/Cargo.toml b/rs/ic_os/utils/Cargo.toml index f6032fcf829..a2b9ac44755 100644 --- a/rs/ic_os/utils/Cargo.toml +++ b/rs/ic_os/utils/Cargo.toml @@ -5,11 +5,3 @@ edition = "2021" [dependencies] anyhow = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -serde_with = "1.6.2" -url = { workspace = true } - -[dev-dependencies] -once_cell = "1.8" - diff --git a/rs/ic_os/utils/src/lib.rs b/rs/ic_os/utils/src/lib.rs index 5e403788d84..f491bb260dc 100644 --- a/rs/ic_os/utils/src/lib.rs +++ b/rs/ic_os/utils/src/lib.rs @@ -4,8 +4,6 @@ use std::process::Command; use anyhow::{bail, Result}; -pub mod deployment; - /// Systemd requires ip addresses to be specified with the prefix length pub fn to_cidr(ipv6_address: Ipv6Addr, prefix_length: u8) -> String { format!("{}/{}", ipv6_address, prefix_length) diff --git a/rs/nervous_system/common/src/lib.rs b/rs/nervous_system/common/src/lib.rs index 548164e2b6f..21a6439e0d3 100644 --- a/rs/nervous_system/common/src/lib.rs +++ b/rs/nervous_system/common/src/lib.rs @@ -8,8 +8,10 @@ use dfn_core::api::time_nanos; use ic_base_types::CanisterId; use ic_canister_log::{export, GlobalBuffer, LogBuffer, LogEntry}; use ic_canisters_http_types::{HttpRequest, HttpResponse, HttpResponseBuilder}; -use ic_ledger_core::tokens::{CheckedAdd, CheckedSub}; -use ic_ledger_core::Tokens; +use ic_ledger_core::{ + tokens::{CheckedAdd, CheckedSub}, + Tokens, +}; use lazy_static::lazy_static; use maplit::hashmap; use num_traits::ops::inv::Inv; @@ -720,75 +722,6 @@ pub fn serve_metrics( } } -/// Verifies that the url is within the allowed length, and begins with -/// `http://` or `https://`. In addition, it will return an error in case of a -/// possibly "dangerous" condition, such as the url containing a username or -/// password, or having a port, or not having a domain name. -pub fn validate_proposal_url( - url: &str, - min_length: usize, - max_length: usize, - field_name: &str, - allowed_domains: Option>, -) -> Result<(), String> { - // // Check that the URL is a sensible length - if url.len() > max_length { - return Err(format!( - "{field_name} must be less than {max_length} characters long, but it is {} characters long. (Field was set to `{url}`.)", - url.len(), - )); - } - if url.len() < min_length { - return Err(format!( - "{field_name} must be greater or equal to than {min_length} characters long, but it is {} characters long. (Field was set to `{url}`.)", - url.len(), - )); - } - - // - - if !url.starts_with("https://") { - return Err(format!( - "{field_name} must begin with https://. (Field was set to `{url}`.)", - )); - } - - let parts_url: Vec<&str> = url.split("://").collect(); - if parts_url.len() > 2 { - return Err(format!( - "{field_name} contains an invalid sequence of characters" - )); - } - - if parts_url.len() < 2 { - return Err(format!("{field_name} is missing content after protocol.")); - } - - if url.contains('@') { - return Err(format!( - "{field_name} cannot contain authentication information" - )); - } - - let parts_past_protocol = parts_url[1].split_once('/'); - - let (domain, _path) = match parts_past_protocol { - Some((domain, path)) => (domain, Some(path)), - None => (parts_url[1], None), - }; - - match allowed_domains { - Some(allowed) => match allowed.iter().any(|allowed| domain == *allowed) { - true => Ok(()), - false => Err(format!( - "{field_name} was not in the list of allowed domains: {:?}", - allowed - )), - }, - None => Ok(()), - } -} - /// Returns the total amount of memory (heap, stable memory, etc) that the calling canister has allocated. #[cfg(target_arch = "wasm32")] pub fn total_memory_size_bytes() -> usize { diff --git a/rs/nervous_system/common/validation/BUILD.bazel b/rs/nervous_system/common/validation/BUILD.bazel new file mode 100644 index 00000000000..bf7a1fef1fd --- /dev/null +++ b/rs/nervous_system/common/validation/BUILD.bazel @@ -0,0 +1,13 @@ +load("@rules_rust//rust:defs.bzl", "rust_library") + +package(default_visibility = ["//visibility:public"]) + +rust_library( + name = "validation", + srcs = glob(["src/**"]), + crate_name = "ic_nervous_system_common_validation", + version = "0.9.0", + deps = [ + # Keep sorted. + ], +) diff --git a/rs/nervous_system/common/validation/Cargo.toml b/rs/nervous_system/common/validation/Cargo.toml new file mode 100644 index 00000000000..51c1513c699 --- /dev/null +++ b/rs/nervous_system/common/validation/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "ic-nervous-system-common-validation" +version.workspace = true +authors.workspace = true +edition.workspace = true +description.workspace = true +documentation.workspace = true + +[lib] +path = "src/lib.rs" + +[dependencies] diff --git a/rs/nervous_system/common/validation/src/lib.rs b/rs/nervous_system/common/validation/src/lib.rs new file mode 100644 index 00000000000..789868393e6 --- /dev/null +++ b/rs/nervous_system/common/validation/src/lib.rs @@ -0,0 +1,68 @@ +/// Verifies that the url is within the allowed length, and begins with +/// `http://` or `https://`. In addition, it will return an error in case of a +/// possibly "dangerous" condition, such as the url containing a username or +/// password, or having a port, or not having a domain name. +pub fn validate_proposal_url( + url: &str, + min_length: usize, + max_length: usize, + field_name: &str, + allowed_domains: Option>, +) -> Result<(), String> { + // // Check that the URL is a sensible length + if url.len() > max_length { + return Err(format!( + "{field_name} must be less than {max_length} characters long, but it is {} characters long. (Field was set to `{url}`.)", + url.len(), + )); + } + if url.len() < min_length { + return Err(format!( + "{field_name} must be greater or equal to than {min_length} characters long, but it is {} characters long. (Field was set to `{url}`.)", + url.len(), + )); + } + + // + + if !url.starts_with("https://") { + return Err(format!( + "{field_name} must begin with https://. (Field was set to `{url}`.)", + )); + } + + let parts_url: Vec<&str> = url.split("://").collect(); + if parts_url.len() > 2 { + return Err(format!( + "{field_name} contains an invalid sequence of characters" + )); + } + + if parts_url.len() < 2 { + return Err(format!("{field_name} is missing content after protocol.")); + } + + if url.contains('@') { + return Err(format!( + "{field_name} cannot contain authentication information" + )); + } + + let parts_past_protocol = parts_url[1].split_once('/'); + + let (domain, _path) = match parts_past_protocol { + Some((domain, path)) => (domain, Some(path)), + None => (parts_url[1], None), + }; + + match allowed_domains { + Some(allowed) => match allowed.iter().any(|allowed| domain == *allowed) { + true => Ok(()), + false => Err(format!( + "{field_name} was not in the list of allowed domains: {:?}", + allowed + )), + }, + None => Ok(()), + } +} diff --git a/rs/nervous_system/integration_tests/BUILD.bazel b/rs/nervous_system/integration_tests/BUILD.bazel index 0063159a0cf..16b6f5174a0 100644 --- a/rs/nervous_system/integration_tests/BUILD.bazel +++ b/rs/nervous_system/integration_tests/BUILD.bazel @@ -53,7 +53,6 @@ BASE_DEPENDENCIES = [ # Currently unused. # DEPENDENCIES = BASE_DEPENDENCIES + [ # "//rs/sns/init", -# "//rs/nns/governance", # "//rs/nns/sns-wasm", # "//rs/nns/handlers/root/impl:root", # "//rs/sns/swap", diff --git a/rs/nervous_system/string/src/lib.rs b/rs/nervous_system/string/src/lib.rs index b38de0da8c2..1c005288891 100644 --- a/rs/nervous_system/string/src/lib.rs +++ b/rs/nervous_system/string/src/lib.rs @@ -22,7 +22,7 @@ pub fn clamp_string_len(s: &str, max_len: usize) -> String { format!("{}...{}", &s[0..head_len], &s[tail_begin..s.len()]) } -pub fn clamp_debug_len(object: impl Debug, max_len: usize) -> String { +pub fn clamp_debug_len(object: &impl Debug, max_len: usize) -> String { clamp_string_len(&format!("{:#?}", object), max_len) } diff --git a/rs/nervous_system/string/src/tests.rs b/rs/nervous_system/string/src/tests.rs index b49363b0c57..dc82f51c436 100644 --- a/rs/nervous_system/string/src/tests.rs +++ b/rs/nervous_system/string/src/tests.rs @@ -25,5 +25,5 @@ fn test_clamp_debug_len() { i: i32, } - assert_eq!(&clamp_debug_len(S { i: 42 }, 100), "S {\n i: 42,\n}"); + assert_eq!(&clamp_debug_len(&S { i: 42 }, 100), "S {\n i: 42,\n}"); } diff --git a/rs/nns/governance/api/BUILD.bazel b/rs/nns/governance/api/BUILD.bazel index f81ddb1fbc4..9b2d06b0768 100644 --- a/rs/nns/governance/api/BUILD.bazel +++ b/rs/nns/governance/api/BUILD.bazel @@ -7,6 +7,7 @@ BASE_DEPENDENCIES = [ # Keep sorted. "//rs/crypto/sha2", "//rs/nervous_system/clients", + "//rs/nervous_system/common/validation", "//rs/nervous_system/proto", "//rs/nns/common", "//rs/protobuf", diff --git a/rs/nns/governance/api/Cargo.toml b/rs/nns/governance/api/Cargo.toml index 266b171603d..44d912c3e12 100644 --- a/rs/nns/governance/api/Cargo.toml +++ b/rs/nns/governance/api/Cargo.toml @@ -17,6 +17,7 @@ comparable = { version = "0.5", features = ["derive"] } ic-base-types = { path = "../../../types/base_types" } ic-crypto-sha2 = { path = "../../../crypto/sha2" } ic-nervous-system-clients = { path = "../../../nervous_system/clients" } +ic-nervous-system-common-validation = { path = "../../../nervous_system/common/validation" } ic-nervous-system-proto = { path = "../../../nervous_system/proto" } ic-nns-common = { path = "../../common" } ic-protobuf = { path = "../../../protobuf" } diff --git a/rs/nns/governance/api/src/lib.rs b/rs/nns/governance/api/src/lib.rs index 33b33b1084d..1d8adeafcc0 100644 --- a/rs/nns/governance/api/src/lib.rs +++ b/rs/nns/governance/api/src/lib.rs @@ -1,5 +1,6 @@ pub mod bitcoin; pub mod pb; -pub mod proposal_helpers; +pub mod proposal_submission_helpers; +pub mod proposal_validation; pub mod subnet_rental; pub mod test_api; diff --git a/rs/nns/governance/api/src/proposal_helpers.rs b/rs/nns/governance/api/src/proposal_submission_helpers.rs similarity index 100% rename from rs/nns/governance/api/src/proposal_helpers.rs rename to rs/nns/governance/api/src/proposal_submission_helpers.rs diff --git a/rs/nns/governance/api/src/proposal_validation.rs b/rs/nns/governance/api/src/proposal_validation.rs new file mode 100644 index 00000000000..395558ef007 --- /dev/null +++ b/rs/nns/governance/api/src/proposal_validation.rs @@ -0,0 +1,80 @@ +use crate::pb::v1::Proposal; + +// The limits on NNS proposal title len (in bytes). +const PROPOSAL_TITLE_BYTES_MIN: usize = 5; +const PROPOSAL_TITLE_BYTES_MAX: usize = 256; +// Proposal validation +// 30000 B +const PROPOSAL_SUMMARY_BYTES_MAX: usize = 30000; +// 2048 characters +const PROPOSAL_URL_CHAR_MAX: usize = 2048; +// 10 characters +const PROPOSAL_URL_CHAR_MIN: usize = 10; + +/// Validates the user submitted proposal fields. +pub fn validate_user_submitted_proposal_fields(proposal: &Proposal) -> Result<(), String> { + validate_proposal_title(&proposal.title)?; + validate_proposal_summary(&proposal.summary)?; + validate_proposal_url(&proposal.url)?; + + Ok(()) +} + +/// Returns whether the following requirements are met: +/// 1. proposal must have a title. +/// 2. title len (bytes, not characters) is between min and max. +pub fn validate_proposal_title(title: &Option) -> Result<(), String> { + // Require that proposal has a title. + let len = title.as_ref().ok_or("Proposal lacks a title")?.len(); + + // Require that title is not too short. + if len < PROPOSAL_TITLE_BYTES_MIN { + return Err(format!( + "Proposal title is too short (must be at least {} bytes)", + PROPOSAL_TITLE_BYTES_MIN, + )); + } + + // Require that title is not too long. + if len > PROPOSAL_TITLE_BYTES_MAX { + return Err(format!( + "Proposal title is too long (can be at most {} bytes)", + PROPOSAL_TITLE_BYTES_MAX, + )); + } + + Ok(()) +} + +/// Returns whether the following requirements are met: +/// 1. summary len (bytes, not characters) is below the max. +pub fn validate_proposal_summary(summary: &str) -> Result<(), String> { + if summary.len() > PROPOSAL_SUMMARY_BYTES_MAX { + return Err(format!( + "The maximum proposal summary size is {} bytes, this proposal is: {} bytes", + PROPOSAL_SUMMARY_BYTES_MAX, + summary.len(), + )); + } + + Ok(()) +} + +/// Returns whether the following requirements are met: +/// 1. If a url is provided, it is between the max and min +/// 2. If a url is specified, it must be from the list of allowed domains. +pub fn validate_proposal_url(url: &str) -> Result<(), String> { + // An empty string will fail validation as it is not a valid url, + // but it's fine for us. + if !url.is_empty() { + ic_nervous_system_common_validation::validate_proposal_url( + url, + PROPOSAL_URL_CHAR_MIN, + PROPOSAL_URL_CHAR_MAX, + "Proposal url", + Some(vec!["forum.dfinity.org"]), + )? + } + + Ok(()) +} diff --git a/rs/nns/governance/src/governance.rs b/rs/nns/governance/src/governance.rs index 981a828300a..9f36d5c4411 100644 --- a/rs/nns/governance/src/governance.rs +++ b/rs/nns/governance/src/governance.rs @@ -88,7 +88,10 @@ use ic_nns_constants::{ LIFELINE_CANISTER_ID, REGISTRY_CANISTER_ID, ROOT_CANISTER_ID, SNS_WASM_CANISTER_ID, SUBNET_RENTAL_CANISTER_ID, }; -use ic_nns_governance_api::subnet_rental::SubnetRentalRequest; +use ic_nns_governance_api::{ + pb::v1::CreateServiceNervousSystem as ApiCreateServiceNervousSystem, proposal_validation, + subnet_rental::SubnetRentalRequest, +}; use ic_protobuf::registry::dc::v1::AddOrRemoveDataCentersProposalPayload; use ic_sns_init::pb::v1::SnsInitPayload; use ic_sns_swap::pb::v1::{self as sns_swap_pb, Lifecycle, NeuronsFundParticipationConstraints}; @@ -124,16 +127,6 @@ pub mod test_data; #[cfg(test)] mod tests; -// The limits on NNS proposal title len (in bytes). -const PROPOSAL_TITLE_BYTES_MIN: usize = 5; -const PROPOSAL_TITLE_BYTES_MAX: usize = 256; -// Proposal validation -// 30000 B -const PROPOSAL_SUMMARY_BYTES_MAX: usize = 30000; -// 2048 characters -const PROPOSAL_URL_CHAR_MAX: usize = 2048; -// 10 characters -const PROPOSAL_URL_CHAR_MIN: usize = 10; // 70 KB (for executing NNS functions that are not canister upgrades) const PROPOSAL_EXECUTE_NNS_FUNCTION_PAYLOAD_BYTES_MAX: usize = 70000; @@ -4726,7 +4719,9 @@ impl Governance { ) }?; - let sns_init_payload = SnsInitPayload::try_from(create_service_nervous_system)?; + let sns_init_payload = SnsInitPayload::try_from(ApiCreateServiceNervousSystem::from( + create_service_nervous_system, + ))?; Ok(SnsInitPayload { neurons_fund_participation_constraints, @@ -4968,7 +4963,9 @@ impl Governance { Err(format!("Topic not specified. proposal: {:#?}", proposal))?; } - validate_user_submitted_proposal_fields(proposal)?; + proposal_validation::validate_user_submitted_proposal_fields( + &ic_nns_governance_api::pb::v1::Proposal::from(proposal.clone()), + )?; if !proposal.allowed_when_resources_are_low() { self.check_heap_can_grow()?; @@ -5177,7 +5174,9 @@ impl Governance { create_service_nervous_system: &CreateServiceNervousSystem, ) -> Result<(), GovernanceError> { // Must be able to convert to a valid SnsInitPayload. - let conversion_result = SnsInitPayload::try_from(create_service_nervous_system.clone()); + let conversion_result = SnsInitPayload::try_from(ApiCreateServiceNervousSystem::from( + create_service_nervous_system.clone(), + )); if let Err(err) = conversion_result { return Err(GovernanceError::new_with_message( ErrorType::InvalidProposal, @@ -8024,74 +8023,6 @@ async fn call_deploy_new_sns( } } -/// Validates the user submitted proposal fields. -pub fn validate_user_submitted_proposal_fields(proposal: &Proposal) -> Result<(), String> { - validate_proposal_title(&proposal.title)?; - validate_proposal_summary(&proposal.summary)?; - validate_proposal_url(&proposal.url)?; - - Ok(()) -} - -/// Returns whether the following requirements are met: -/// 1. proposal must have a title. -/// 2. title len (bytes, not characters) is between min and max. -pub fn validate_proposal_title(title: &Option) -> Result<(), String> { - // Require that proposal has a title. - let len = title.as_ref().ok_or("Proposal lacks a title")?.len(); - - // Require that title is not too short. - if len < PROPOSAL_TITLE_BYTES_MIN { - return Err(format!( - "Proposal title is too short (must be at least {} bytes)", - PROPOSAL_TITLE_BYTES_MIN, - )); - } - - // Require that title is not too long. - if len > PROPOSAL_TITLE_BYTES_MAX { - return Err(format!( - "Proposal title is too long (can be at most {} bytes)", - PROPOSAL_TITLE_BYTES_MAX, - )); - } - - Ok(()) -} - -/// Returns whether the following requirements are met: -/// 1. summary len (bytes, not characters) is below the max. -pub fn validate_proposal_summary(summary: &str) -> Result<(), String> { - if summary.len() > PROPOSAL_SUMMARY_BYTES_MAX { - return Err(format!( - "The maximum proposal summary size is {} bytes, this proposal is: {} bytes", - PROPOSAL_SUMMARY_BYTES_MAX, - summary.len(), - )); - } - - Ok(()) -} - -/// Returns whether the following requirements are met: -/// 1. If a url is provided, it is between the max and min -/// 2. If a url is specified, it must be from the list of allowed domains. -pub fn validate_proposal_url(url: &str) -> Result<(), String> { - // An empty string will fail validation as it is not a valid url, - // but it's fine for us. - if !url.is_empty() { - ic_nervous_system_common::validate_proposal_url( - url, - PROPOSAL_URL_CHAR_MIN, - PROPOSAL_URL_CHAR_MAX, - "Proposal url", - Some(vec!["forum.dfinity.org"]), - )? - } - - Ok(()) -} - fn validate_motion(motion: &Motion) -> Result<(), GovernanceError> { if motion.motion_text.len() > PROPOSAL_MOTION_TEXT_BYTES_MAX { return Err(GovernanceError::new_with_message( diff --git a/rs/nns/governance/src/governance/tests/mod.rs b/rs/nns/governance/src/governance/tests/mod.rs index 125ad78de31..c6ea2428622 100644 --- a/rs/nns/governance/src/governance/tests/mod.rs +++ b/rs/nns/governance/src/governance/tests/mod.rs @@ -13,6 +13,8 @@ use ic_nervous_system_common::{assert_is_err, assert_is_ok, E8}; #[cfg(feature = "test")] use ic_nervous_system_proto::pb::v1::GlobalTimeOfDay; use ic_nns_common::pb::v1::NeuronId; +#[cfg(feature = "test")] +use ic_nns_governance_api::pb::v1::CreateServiceNervousSystem as ApiCreateServiceNervousSystem; use ic_protobuf::registry::dc::v1::DataCenterRecord; #[cfg(feature = "test")] use ic_sns_init::pb::v1::SnsInitPayload; @@ -248,9 +250,10 @@ mod convert_from_create_service_nervous_system_to_sns_init_payload_tests { // Step 1: Prepare the world. (In this case, trivial.) // Step 2: Call the code under test. - let converted = - SnsInitPayload::try_from(CREATE_SERVICE_NERVOUS_SYSTEM_WITH_MATCHED_FUNDING.clone()) - .unwrap(); + let converted = SnsInitPayload::try_from(ApiCreateServiceNervousSystem::from( + CREATE_SERVICE_NERVOUS_SYSTEM_WITH_MATCHED_FUNDING.clone(), + )) + .unwrap(); // Step 3: Inspect the result. @@ -498,7 +501,7 @@ mod convert_from_create_service_nervous_system_to_sns_init_payload_tests { }); // Step 2: Call the code under test. - let converted = SnsInitPayload::try_from(original); + let converted = SnsInitPayload::try_from(ApiCreateServiceNervousSystem::from(original)); // Step 3: Inspect the result: Err must contain "wait for quiet". match converted { @@ -576,7 +579,10 @@ mod convert_create_service_nervous_system_proposal_to_sns_init_payload_tests_wit .expect("Cannot compute swap_start_timestamp_seconds, swap_due_timestamp_seconds.") }; - let sns_init_payload = SnsInitPayload::try_from(create_service_nervous_system).unwrap(); + let sns_init_payload = SnsInitPayload::try_from(ApiCreateServiceNervousSystem::from( + create_service_nervous_system, + )) + .unwrap(); SnsInitPayload { neurons_fund_participation_constraints: Some( diff --git a/rs/nns/governance/src/proposals/create_service_nervous_system.rs b/rs/nns/governance/src/proposals/create_service_nervous_system.rs index 37eb7a5cca0..0d79cadbc59 100644 --- a/rs/nns/governance/src/proposals/create_service_nervous_system.rs +++ b/rs/nns/governance/src/proposals/create_service_nervous_system.rs @@ -1,34 +1,7 @@ -use crate::pb::v1::{create_service_nervous_system, CreateServiceNervousSystem}; +use crate::pb::v1::CreateServiceNervousSystem; use ic_nervous_system_proto::pb::v1::{Duration, GlobalTimeOfDay}; -use ic_sns_init::pb::v1::{self as sns_init_pb, sns_init_payload, SnsInitPayload}; impl CreateServiceNervousSystem { - pub fn sns_token_e8s(&self) -> Option { - self.initial_token_distribution - .as_ref()? - .swap_distribution - .as_ref()? - .total - .as_ref()? - .e8s - } - - pub fn transaction_fee_e8s(&self) -> Option { - self.ledger_parameters - .as_ref()? - .transaction_fee - .as_ref()? - .e8s - } - - pub fn neuron_minimum_stake_e8s(&self) -> Option { - self.governance_parameters - .as_ref()? - .neuron_minimum_stake - .as_ref()? - .e8s - } - /// Computes timestamps for when the SNS token swap will start, and will be /// due, based on the start and end times. /// @@ -51,457 +24,3 @@ impl CreateServiceNervousSystem { ) } } - -fn divide_perfectly(field_name: &str, dividend: u64, divisor: u64) -> Result { - match dividend.checked_rem(divisor) { - None => Err(format!( - "Attempted to divide by zero while validating {}. \ - (This is likely due to an internal bug.)", - field_name, - )), - - Some(0) => Ok(dividend.saturating_div(divisor)), - - Some(remainder) => { - assert_ne!(remainder, 0); - Err(format!( - "{} is supposed to contain a value that is evenly divisible by {}, \ - but it contains {}, which leaves a remainder of {}.", - field_name, divisor, dividend, remainder, - )) - } - } -} - -impl TryFrom for SnsInitPayload { - type Error = String; - - fn try_from(src: CreateServiceNervousSystem) -> Result { - let CreateServiceNervousSystem { - name, - description, - url, - logo, - fallback_controller_principal_ids, - dapp_canisters, - - initial_token_distribution, - - swap_parameters, - ledger_parameters, - governance_parameters, - } = src; - - let mut defects = vec![]; - - let ledger_parameters = ledger_parameters.unwrap_or_default(); - let governance_parameters = governance_parameters.unwrap_or_default(); - let swap_parameters = swap_parameters.unwrap_or_default(); - - let create_service_nervous_system::LedgerParameters { - transaction_fee, - token_name, - token_symbol, - token_logo, - } = ledger_parameters; - - let transaction_fee_e8s = transaction_fee.and_then(|tokens| tokens.e8s); - - let token_logo = token_logo.and_then(|image| image.base64_encoding); - - let proposal_reject_cost_e8s = governance_parameters - .proposal_rejection_fee - .and_then(|tokens| tokens.e8s); - - let neuron_minimum_stake_e8s = governance_parameters - .neuron_minimum_stake - .and_then(|tokens| tokens.e8s); - - let initial_token_distribution = match sns_init_payload::InitialTokenDistribution::try_from( - initial_token_distribution.unwrap_or_default(), - ) { - Ok(ok) => Some(ok), - Err(err) => { - defects.push(err); - None - } - }; - - let fallback_controller_principal_ids = fallback_controller_principal_ids - .iter() - .map(|principal_id| principal_id.to_string()) - .collect(); - - let logo = logo.and_then(|image| image.base64_encoding); - // url, name, and description need no conversion. - - let neuron_minimum_dissolve_delay_to_vote_seconds = governance_parameters - .neuron_minimum_dissolve_delay_to_vote - .and_then(|duration| duration.seconds); - - let voting_reward_parameters = governance_parameters - .voting_reward_parameters - .unwrap_or_default(); - - let initial_reward_rate_basis_points = voting_reward_parameters - .initial_reward_rate - .and_then(|percentage| percentage.basis_points); - let final_reward_rate_basis_points = voting_reward_parameters - .final_reward_rate - .and_then(|percentage| percentage.basis_points); - - let reward_rate_transition_duration_seconds = voting_reward_parameters - .reward_rate_transition_duration - .and_then(|duration| duration.seconds); - - let max_dissolve_delay_seconds = governance_parameters - .neuron_maximum_dissolve_delay - .and_then(|duration| duration.seconds); - - let max_neuron_age_seconds_for_age_bonus = governance_parameters - .neuron_maximum_age_for_age_bonus - .and_then(|duration| duration.seconds); - - let mut basis_points_to_percentage = - |field_name, percentage: ic_nervous_system_proto::pb::v1::Percentage| -> u64 { - let basis_points = percentage.basis_points.unwrap_or_default(); - match divide_perfectly(field_name, basis_points, 100) { - Ok(ok) => ok, - Err(err) => { - defects.push(err); - basis_points.saturating_div(100) - } - } - }; - - let max_dissolve_delay_bonus_percentage = governance_parameters - .neuron_maximum_dissolve_delay_bonus - .map(|percentage| { - basis_points_to_percentage( - "governance_parameters.neuron_maximum_dissolve_delay_bonus", - percentage, - ) - }); - - let max_age_bonus_percentage = - governance_parameters - .neuron_maximum_age_bonus - .map(|percentage| { - basis_points_to_percentage( - "governance_parameters.neuron_maximum_age_bonus", - percentage, - ) - }); - - let initial_voting_period_seconds = governance_parameters - .proposal_initial_voting_period - .and_then(|duration| duration.seconds); - - let wait_for_quiet_deadline_increase_seconds = governance_parameters - .proposal_wait_for_quiet_deadline_increase - .and_then(|duration| duration.seconds); - - let dapp_canisters = Some(sns_init_pb::DappCanisters { - canisters: dapp_canisters, - }); - - let confirmation_text = swap_parameters.confirmation_text; - - let restricted_countries = swap_parameters.restricted_countries; - - let min_participants = swap_parameters.minimum_participants; - - let min_direct_participation_icp_e8s = swap_parameters - .minimum_direct_participation_icp - .and_then(|tokens| tokens.e8s); - - let max_direct_participation_icp_e8s = swap_parameters - .maximum_direct_participation_icp - .and_then(|tokens| tokens.e8s); - - // Check if the deprecated fields are set. - if let Some(neurons_fund_investment_icp) = swap_parameters.neurons_fund_investment_icp { - defects.push(format!( - "neurons_fund_investment_icp ({:?}) is deprecated; please set \ - neurons_fund_participation instead.", - neurons_fund_investment_icp, - )); - } - if let Some(minimum_icp) = swap_parameters.minimum_icp { - defects.push(format!( - "minimum_icp ({:?}) is deprecated; please set \ - min_direct_participation_icp_e8s instead.", - minimum_icp, - )); - }; - if let Some(maximum_icp) = swap_parameters.maximum_icp { - defects.push(format!( - "maximum_icp ({:?}) is deprecated; please set \ - max_direct_participation_icp_e8s instead.", - maximum_icp, - )); - }; - - let neurons_fund_participation = swap_parameters.neurons_fund_participation; - - let min_participant_icp_e8s = swap_parameters - .minimum_participant_icp - .and_then(|tokens| tokens.e8s); - - let max_participant_icp_e8s = swap_parameters - .maximum_participant_icp - .and_then(|tokens| tokens.e8s); - - let neuron_basket_construction_parameters = swap_parameters - .neuron_basket_construction_parameters - .map( - |basket| ic_sns_swap::pb::v1::NeuronBasketConstructionParameters { - count: basket.count.unwrap_or_default(), - dissolve_delay_interval_seconds: basket - .dissolve_delay_interval - .map(|duration| duration.seconds.unwrap_or_default()) - .unwrap_or_default(), - }, - ); - - if !defects.is_empty() { - return Err(format!( - "Failed to convert CreateServiceNervousSystem proposal to SnsInitPayload:\n{}", - defects.join("\n"), - )); - } - - let result = Self { - transaction_fee_e8s, - token_name, - token_symbol, - proposal_reject_cost_e8s, - neuron_minimum_stake_e8s, - initial_token_distribution, - fallback_controller_principal_ids, - logo, - url, - name, - description, - neuron_minimum_dissolve_delay_to_vote_seconds, - initial_reward_rate_basis_points, - final_reward_rate_basis_points, - reward_rate_transition_duration_seconds, - max_dissolve_delay_seconds, - max_neuron_age_seconds_for_age_bonus, - max_dissolve_delay_bonus_percentage, - max_age_bonus_percentage, - initial_voting_period_seconds, - wait_for_quiet_deadline_increase_seconds, - dapp_canisters, - min_participants, - min_direct_participation_icp_e8s, - max_direct_participation_icp_e8s, - min_participant_icp_e8s, - max_participant_icp_e8s, - neuron_basket_construction_parameters, - confirmation_text, - restricted_countries, - token_logo, - neurons_fund_participation, - - // These are not known from only the CreateServiceNervousSystem - // proposal. See `Governance::make_sns_init_payload`. - nns_proposal_id: None, - swap_start_timestamp_seconds: None, - swap_due_timestamp_seconds: None, - neurons_fund_participation_constraints: None, - - // Deprecated fields should be set to `None`. - min_icp_e8s: None, - max_icp_e8s: None, - }; - - result.validate_pre_execution()?; - - Ok(result) - } -} - -impl TryFrom - for sns_init_payload::InitialTokenDistribution -{ - type Error = String; - - fn try_from( - src: create_service_nervous_system::InitialTokenDistribution, - ) -> Result { - let create_service_nervous_system::InitialTokenDistribution { - developer_distribution, - treasury_distribution, - swap_distribution, - } = src; - - let mut defects = vec![]; - - let developer_distribution = match sns_init_pb::DeveloperDistribution::try_from( - developer_distribution.unwrap_or_default(), - ) { - Ok(ok) => Some(ok), - Err(err) => { - defects.push(err); - None - } - }; - - let treasury_distribution = match sns_init_pb::TreasuryDistribution::try_from( - treasury_distribution.unwrap_or_default(), - ) { - Ok(ok) => Some(ok), - Err(err) => { - defects.push(err); - None - } - }; - - let swap_distribution = - match sns_init_pb::SwapDistribution::try_from(swap_distribution.unwrap_or_default()) { - Ok(ok) => Some(ok), - Err(err) => { - defects.push(err); - None - } - }; - - let airdrop_distribution = Some(sns_init_pb::AirdropDistribution::default()); - - if !defects.is_empty() { - return Err(format!( - "Failed to convert to InitialTokenDistribution for the following reasons:\n{}", - defects.join("\n"), - )); - } - - Ok(Self::FractionalDeveloperVotingPower( - sns_init_pb::FractionalDeveloperVotingPower { - developer_distribution, - treasury_distribution, - swap_distribution, - airdrop_distribution, - }, - )) - } -} - -impl TryFrom - for sns_init_pb::SwapDistribution -{ - type Error = String; - - fn try_from( - src: create_service_nervous_system::initial_token_distribution::SwapDistribution, - ) -> Result { - let create_service_nervous_system::initial_token_distribution::SwapDistribution { total } = - src; - - let total_e8s = total.unwrap_or_default().e8s.unwrap_or_default(); - let initial_swap_amount_e8s = total_e8s; - - Ok(Self { - initial_swap_amount_e8s, - total_e8s, - }) - } -} - -impl TryFrom - for sns_init_pb::TreasuryDistribution -{ - type Error = String; - - fn try_from( - src: create_service_nervous_system::initial_token_distribution::TreasuryDistribution, - ) -> Result { - let create_service_nervous_system::initial_token_distribution::TreasuryDistribution { - total, - } = src; - - let total_e8s = total.unwrap_or_default().e8s.unwrap_or_default(); - - Ok(Self { total_e8s }) - } -} - -impl TryFrom - for sns_init_pb::DeveloperDistribution -{ - type Error = String; - - fn try_from( - src: create_service_nervous_system::initial_token_distribution::DeveloperDistribution, - ) -> Result { - let create_service_nervous_system::initial_token_distribution::DeveloperDistribution { - developer_neurons, - } = src; - - let mut defects = vec![]; - - let developer_neurons = - developer_neurons - .into_iter() - .enumerate() - .filter_map(|(i, neuron_distribution)| { - match sns_init_pb::NeuronDistribution::try_from(neuron_distribution) { - Ok(ok) => Some(ok), - Err(err) => { - defects.push(format!( - "Failed to convert element at index {} in field \ - `developer_neurons`: {}", - i, err, - )); - None - } - } - }) - .collect(); - - if !defects.is_empty() { - return Err(format!( - "Failed to convert to DeveloperDistribution for SnsInitPayload: {}", - defects.join("\n"), - )); - } - - Ok(Self { developer_neurons }) - } -} - -impl TryFrom -for sns_init_pb::NeuronDistribution -{ - type Error = String; - - fn try_from( - src: create_service_nervous_system::initial_token_distribution::developer_distribution::NeuronDistribution, - ) -> Result { - let create_service_nervous_system::initial_token_distribution::developer_distribution::NeuronDistribution { - controller, - dissolve_delay, - memo, - stake, - vesting_period, - } = src; - - // controller needs no conversion - let stake_e8s = stake.unwrap_or_default().e8s.unwrap_or_default(); - let memo = memo.unwrap_or_default(); - let dissolve_delay_seconds = dissolve_delay - .unwrap_or_default() - .seconds - .unwrap_or_default(); - let vesting_period_seconds = vesting_period.unwrap_or_default().seconds; - - Ok(Self { - controller, - stake_e8s, - memo, - dissolve_delay_seconds, - vesting_period_seconds, - }) - } -} diff --git a/rs/nns/governance/tests/governance.rs b/rs/nns/governance/tests/governance.rs index 2f8cbe1fcb9..ba533b6efa3 100644 --- a/rs/nns/governance/tests/governance.rs +++ b/rs/nns/governance/tests/governance.rs @@ -47,7 +47,7 @@ use ic_nns_governance::{ test_data::{ CREATE_SERVICE_NERVOUS_SYSTEM, CREATE_SERVICE_NERVOUS_SYSTEM_WITH_MATCHED_FUNDING, }, - validate_proposal_title, Environment, Governance, HeapGrowthPotential, + Environment, Governance, HeapGrowthPotential, EXECUTE_NNS_FUNCTION_PAYLOAD_LISTING_BYTES_MAX, MAX_DISSOLVE_DELAY_SECONDS, MAX_NEURON_AGE_FOR_AGE_BONUS, MAX_NUMBER_OF_PROPOSALS_WITH_BALLOTS, MIN_DISSOLVE_DELAY_FOR_VOTE_ELIGIBILITY_SECONDS, PROPOSAL_MOTION_TEXT_BYTES_MAX, @@ -95,6 +95,10 @@ use ic_nns_governance::{ temporarily_disable_private_neuron_enforcement, temporarily_disable_set_visibility_proposals, temporarily_enable_private_neuron_enforcement, temporarily_enable_set_visibility_proposals, }; +use ic_nns_governance_api::{ + pb::v1::CreateServiceNervousSystem as ApiCreateServiceNervousSystem, + proposal_validation::validate_proposal_title, +}; use ic_nns_governance_init::GovernanceCanisterInitPayloadBuilder; use ic_sns_init::pb::v1::SnsInitPayload; use ic_sns_root::{GetSnsCanistersSummaryRequest, GetSnsCanistersSummaryResponse}; @@ -11881,7 +11885,7 @@ lazy_static! { ) }; - let sns_init_payload = SnsInitPayload::try_from(create_service_nervous_system) + let sns_init_payload = SnsInitPayload::try_from(ApiCreateServiceNervousSystem::from(create_service_nervous_system)) .expect( "Cannot build SNS_INIT_PAYLOAD from \ CREATE_SERVICE_NERVOUS_SYSTEM_WITH_MATCHED_FUNDING." diff --git a/rs/nns/integration_tests/src/reset_root.rs b/rs/nns/integration_tests/src/reset_root.rs index fb6a1a30eb4..b7be4b7c53e 100644 --- a/rs/nns/integration_tests/src/reset_root.rs +++ b/rs/nns/integration_tests/src/reset_root.rs @@ -5,7 +5,7 @@ use ic_nns_common::pb::v1::NeuronId; use ic_nns_constants::{LIFELINE_CANISTER_ID, ROOT_CANISTER_ID}; use ic_nns_governance_api::{ pb::v1::{manage_neuron_response::Command, NnsFunction}, - proposal_helpers::create_external_update_proposal_candid, + proposal_submission_helpers::create_external_update_proposal_candid, }; use ic_nns_test_utils::{ common::{build_root_wasm, modify_wasm_bytes, NnsInitPayloadsBuilder}, diff --git a/rs/p2p/quic_transport/src/connection_handle.rs b/rs/p2p/quic_transport/src/connection_handle.rs index 744aace7935..9ac1e1c5023 100644 --- a/rs/p2p/quic_transport/src/connection_handle.rs +++ b/rs/p2p/quic_transport/src/connection_handle.rs @@ -236,7 +236,7 @@ mod tests { net::{Ipv4Addr, SocketAddr}, sync::Arc, }; - use tokio::sync::Notify; + use tokio::sync::Barrier; use turmoil::Builder; use crate::connection_handle::SendStreamDropGuard; @@ -256,10 +256,10 @@ mod tests { // If the sender closes the connection immediately after sending, then // quinn might abort transmitting the message for the sender. - // Thus we wait with closing the sender's quinn endpoint, and killing the connection, - // until the receiver has received the message. - let receiver_received_message = Arc::new(Notify::new()); - let receiver_received_message_clone = receiver_received_message.clone(); + // Thus we wait with closing the endpoints until all client simulations + // complete. + let client_completed = Arc::new(Barrier::new(2)); + let client_completed_clone = client_completed.clone(); sim.client(receiver, async move { let udp_listener = turmoil::net::UdpSocket::bind(node_addr).await.unwrap(); @@ -286,8 +286,6 @@ mod tests { .unwrap(); let server_result = recv_stream.read_to_end(MAX_READ_SIZE).await; - receiver_received_message_clone.notify_one(); - if stream_is_finished_and_stopped { assert_matches!( server_result, @@ -298,6 +296,8 @@ mod tests { Err(ReadToEndError::Read(ReadError::Reset { .. })) ); } + client_completed_clone.wait().await; + Ok(()) }); @@ -356,7 +356,7 @@ mod tests { }; drop(drop_guard); - receiver_received_message.notified().await; + client_completed.wait().await; Ok(()) }); diff --git a/rs/recovery/BUILD.bazel b/rs/recovery/BUILD.bazel index b46a02bb193..5846e144a4c 100644 --- a/rs/recovery/BUILD.bazel +++ b/rs/recovery/BUILD.bazel @@ -58,7 +58,6 @@ ALIASES = {} rust_library( name = "recovery", - testonly = True, srcs = glob(["src/**/*.rs"]), aliases = ALIASES, crate_name = "ic_recovery", @@ -74,7 +73,6 @@ rust_library( rust_binary( name = "ic-recovery", - testonly = True, srcs = ["src/main.rs"], aliases = ALIASES, proc_macro_deps = MACRO_DEPENDENCIES, diff --git a/rs/recovery/subnet_splitting/BUILD.bazel b/rs/recovery/subnet_splitting/BUILD.bazel index 759ab6d0fe1..5e1465a4da5 100644 --- a/rs/recovery/subnet_splitting/BUILD.bazel +++ b/rs/recovery/subnet_splitting/BUILD.bazel @@ -38,7 +38,6 @@ ALIASES = {} rust_library( name = "subnet_splitting", - testonly = True, srcs = glob( ["src/**/*.rs"], exclude = ["src/main.rs"], @@ -53,7 +52,6 @@ rust_library( rust_binary( name = "subnet-splitting-tool", - testonly = True, srcs = ["src/main.rs"], aliases = ALIASES, proc_macro_deps = MACRO_DEPENDENCIES, diff --git a/rs/registry/admin/BUILD.bazel b/rs/registry/admin/BUILD.bazel index 5c84b0f8bbd..b53b531e936 100644 --- a/rs/registry/admin/BUILD.bazel +++ b/rs/registry/admin/BUILD.bazel @@ -63,7 +63,6 @@ BASE_DEPENDENCIES = [ # dependencies (`DEPENDENCIES`), or `DEPENDENCIES_WITH_TEST_FEATURES` feature previews. DEPENDENCIES = BASE_DEPENDENCIES + [ "//rs/sns/init", - "//rs/nns/governance", "//rs/nns/governance/api", "//rs/nns/sns-wasm", "//rs/nns/handlers/root/impl:root", @@ -74,7 +73,6 @@ DEPENDENCIES = BASE_DEPENDENCIES + [ # (Currently not used) # DEPENDENCIES_WITH_TEST_FEATURES = BASE_DEPENDENCIES + [ # "//rs/sns/init:init--test_feature", -# "//rs/nns/governance:governance--test_feature", # "//rs/nns/sns-wasm:sns-wasm--test_feature", # "//rs/nns/handlers/root/impl:root--test_feature", # "//rs/sns/swap:swap--test_feature", diff --git a/rs/registry/admin/Cargo.toml b/rs/registry/admin/Cargo.toml index 5cb96489638..33d22692212 100644 --- a/rs/registry/admin/Cargo.toml +++ b/rs/registry/admin/Cargo.toml @@ -34,7 +34,6 @@ ic-nervous-system-proto = { path = "../../nervous_system/proto" } ic-nervous-system-root = { path = "../../nervous_system/root" } ic-nns-common = { path = "../../nns/common" } ic-nns-constants = { path = "../../nns/constants" } -ic-nns-governance = { path = "../../nns/governance" } ic-nns-governance-api = { path = "../../nns/governance/api" } ic-nns-handler-root = { path = "../../nns/handlers/root/impl" } ic-nns-init = { path = "../../nns/init" } diff --git a/rs/registry/admin/src/main.rs b/rs/registry/admin/src/main.rs index 13b249b4268..f1be163e340 100644 --- a/rs/registry/admin/src/main.rs +++ b/rs/registry/admin/src/main.rs @@ -62,7 +62,7 @@ use ic_nns_governance_api::{ NodeProvider, ProposalActionRequest, RewardNodeProviders, StopOrStartCanister, UpdateCanisterSettings, }, - proposal_helpers::{ + proposal_submission_helpers::{ create_external_update_proposal_candid, create_make_proposal_payload, decode_make_proposal_response, }, @@ -3071,12 +3071,10 @@ impl TryFrom for CreateServiceNervousSys governance_parameters, }; - let result = ic_nns_governance::pb::v1::CreateServiceNervousSystem::from(result); - // TODO migrate validation out of SnsInitPayload so we no longer have to support ic_nns_gov types SnsInitPayload::try_from(result.clone())?; - Ok(result.into()) + Ok(result) } } diff --git a/rs/replay/BUILD.bazel b/rs/replay/BUILD.bazel index 4cffecdb474..4306839d2d6 100644 --- a/rs/replay/BUILD.bazel +++ b/rs/replay/BUILD.bazel @@ -10,7 +10,6 @@ DEPENDENCIES = [ "//rs/consensus/utils", "//rs/crypto", "//rs/crypto/for_verification_only", - "//rs/crypto/test_utils/ni-dkg", "//rs/cycles_account_manager", "//rs/execution_environment", "//rs/interfaces", @@ -62,7 +61,6 @@ ALIASES = {} rust_library( name = "replay", - testonly = True, srcs = glob(["src/**"]), aliases = ALIASES, crate_name = "ic_replay", @@ -77,7 +75,6 @@ rust_library( rust_binary( name = "ic-replay", - testonly = True, srcs = ["src/main.rs"], aliases = ALIASES, proc_macro_deps = MACRO_DEPENDENCIES, diff --git a/rs/replay/Cargo.toml b/rs/replay/Cargo.toml index 998acab9b0d..42d0fb9a3c0 100644 --- a/rs/replay/Cargo.toml +++ b/rs/replay/Cargo.toml @@ -17,7 +17,6 @@ ic-config = { path = "../config" } ic-consensus = { path = "../consensus" } ic-consensus-utils = { path = "../consensus/utils" } ic-crypto-for-verification-only = { path = "../crypto/for_verification_only" } -ic-crypto-test-utils-ni-dkg = { path = "../crypto/test_utils/ni-dkg" } ic-cycles-account-manager = { path = "../cycles_account_manager" } ic-execution-environment = { path = "../execution_environment" } ic-interfaces = { path = "../interfaces" } diff --git a/rs/replay/src/player.rs b/rs/replay/src/player.rs index 168221c516a..1499c9ba73c 100644 --- a/rs/replay/src/player.rs +++ b/rs/replay/src/player.rs @@ -15,9 +15,6 @@ use ic_consensus_utils::{ pool_reader::PoolReader, }; use ic_crypto_for_verification_only::CryptoComponentForVerificationOnly; -use ic_crypto_test_utils_ni_dkg::{ - dummy_initial_dkg_transcript_with_master_key, sign_message, SecretKeyBytes, -}; use ic_cycles_account_manager::CyclesAccountManager; use ic_execution_environment::ExecutionServices; use ic_interfaces::{ @@ -60,7 +57,7 @@ use ic_types::{ }, crypto::{ threshold_sig::ni_dkg::{NiDkgId, NiDkgTag, NiDkgTargetSubnet}, - CombinedThresholdSig, CombinedThresholdSigOf, Signable, Signed, + CombinedThresholdSig, CombinedThresholdSigOf, Signed, }, ingress::{IngressState, IngressStatus, WasmResult}, malicious_flags::MaliciousFlags, @@ -70,7 +67,6 @@ use ic_types::{ CryptoHashOfPartialState, CryptoHashOfState, Height, NodeId, PrincipalId, Randomness, RegistryVersion, ReplicaVersion, SubnetId, Time, UserId, }; -use rand::{rngs::StdRng, SeedableRng}; use serde::{Deserialize, Serialize}; use slog_async::AsyncGuard; use std::{ @@ -828,37 +824,22 @@ impl Player { } fn certify_state_with_dummy_certification(&self) { - let (_ni_dkg_transcript, secret_key) = - dummy_initial_dkg_transcript_with_master_key(&mut StdRng::seed_from_u64(42)); if self.state_manager.latest_state_height() > self.state_manager.latest_certified_height() { let state_hashes = self.state_manager.list_state_hashes_to_certify(); let (height, hash) = state_hashes .last() .expect("There should be at least one state hash to certify"); self.state_manager - .deliver_state_certification(Self::certify_hash( - &secret_key, - self.subnet_id, - height, - hash, - )); + .deliver_state_certification(Self::certify_hash(self.subnet_id, height, hash)); } } fn certify_hash( - secret_key: &SecretKeyBytes, subnet_id: SubnetId, height: &Height, hash: &CryptoHashOfPartialState, ) -> Certification { - let signature = sign_message( - CertificationContent::new(hash.clone()) - .as_signed_bytes() - .as_slice(), - secret_key, - ); - let combined_sig = - CombinedThresholdSigOf::from(CombinedThresholdSig(signature.as_ref().to_vec())); + let combined_sig = CombinedThresholdSigOf::from(CombinedThresholdSig(vec![])); Certification { height: *height, signed: Signed { diff --git a/rs/sns/cli/BUILD.bazel b/rs/sns/cli/BUILD.bazel index f0e16f3ead0..ba44cc53e85 100644 --- a/rs/sns/cli/BUILD.bazel +++ b/rs/sns/cli/BUILD.bazel @@ -36,14 +36,14 @@ BASE_DEPENDENCIES = [ # Each target declared in this file may choose either these (release-ready) # dependencies (`DEPENDENCIES`), or `DEPENDENCIES_WITH_TEST_FEATURES` feature previews. DEPENDENCIES = BASE_DEPENDENCIES + [ - "//rs/nns/governance", + "//rs/nns/governance/api", "//rs/nns/sns-wasm", "//rs/sns/governance", "//rs/sns/init", ] DEPENDENCIES_WITH_TEST_FEATURES = BASE_DEPENDENCIES + [ - "//rs/nns/governance:governance--test_feature", + "//rs/nns/governance/api:api--test_feature", "//rs/nns/sns-wasm:sns-wasm--test_feature", "//rs/sns/governance:governance--test_feature", "//rs/sns/init:init--test_feature", diff --git a/rs/sns/cli/Cargo.toml b/rs/sns/cli/Cargo.toml index 24b2ef0198d..6df0cf7bd47 100644 --- a/rs/sns/cli/Cargo.toml +++ b/rs/sns/cli/Cargo.toml @@ -30,7 +30,7 @@ ic-nervous-system-humanize = { path = "../../nervous_system/humanize" } ic-nervous-system-proto = { path = "../../nervous_system/proto" } ic-nns-common = { path = "../../nns/common" } ic-nns-constants = { path = "../../nns/constants" } -ic-nns-governance = { path = "../../nns/governance" } +ic-nns-governance-api = { path = "../../nns/governance/api" } ic-sns-governance = { path = "../governance" } ic-sns-init = { path = "../init" } ic-sns-root = { path = "../root" } diff --git a/rs/sns/cli/src/init_config_file/friendly.rs b/rs/sns/cli/src/init_config_file/friendly.rs index 9a15db8bdc4..cdcfd9056a7 100644 --- a/rs/sns/cli/src/init_config_file/friendly.rs +++ b/rs/sns/cli/src/init_config_file/friendly.rs @@ -1,9 +1,9 @@ use anyhow::{anyhow, Result}; use ic_base_types::PrincipalId; use ic_nervous_system_proto::pb::v1 as nervous_system_pb; -use ic_nns_governance::{ - governance::validate_user_submitted_proposal_fields, +use ic_nns_governance_api::{ pb::v1::{proposal::Action, CreateServiceNervousSystem, Proposal}, + proposal_validation::validate_user_submitted_proposal_fields, }; use ic_sns_init::pb::v1::SnsInitPayload; use std::{ @@ -16,7 +16,7 @@ use std::{ // related types in this module, put these aliases in their own module to avoid // getting mixed up. mod nns_governance_pb { - pub use ic_nns_governance::pb::v1::create_service_nervous_system::{ + pub use ic_nns_governance_api::pb::v1::create_service_nervous_system::{ governance_parameters::VotingRewardParameters, initial_token_distribution::{ developer_distribution::NeuronDistribution, DeveloperDistribution, SwapDistribution, @@ -368,7 +368,8 @@ impl SnsConfigurationFile { )), }; - validate_user_submitted_proposal_fields(&proposal).map_err(|e| anyhow!("{}", e))?; + validate_user_submitted_proposal_fields(&(proposal.clone())) + .map_err(|e| anyhow!("{}", e))?; Ok(proposal) } diff --git a/rs/sns/cli/src/lib.rs b/rs/sns/cli/src/lib.rs index cb8f4dc9e30..5cf633484d5 100644 --- a/rs/sns/cli/src/lib.rs +++ b/rs/sns/cli/src/lib.rs @@ -11,7 +11,7 @@ use ic_base_types::PrincipalId; use ic_crypto_sha2::Sha256; use ic_nervous_system_common_test_keys::TEST_NEURON_1_OWNER_KEYPAIR; use ic_nns_constants::{GOVERNANCE_CANISTER_ID, SNS_WASM_CANISTER_ID}; -use ic_nns_governance::pb::v1::{ +use ic_nns_governance_api::pb::v1::{ manage_neuron::{self, NeuronIdOrSubaccount}, manage_neuron_response::{self, MakeProposalResponse}, ManageNeuron, ManageNeuronResponse, Proposal, @@ -160,7 +160,7 @@ pub(crate) fn generate_sns_init_payload(path: &Path) -> Result { fn read_create_service_nervous_system_from_init_yaml( path: &Path, -) -> Result { +) -> Result { let contents = std::fs::read_to_string(path).context(format!("Unable to read {path:?}"))?; let configuration = serde_yaml::from_str::(&contents) diff --git a/rs/sns/cli/src/propose.rs b/rs/sns/cli/src/propose.rs index 132a79ca28f..498ae8cf3a1 100644 --- a/rs/sns/cli/src/propose.rs +++ b/rs/sns/cli/src/propose.rs @@ -9,7 +9,9 @@ use ic_nervous_system_common::ledger::compute_neuron_staking_subaccount_bytes; use ic_nervous_system_common_test_keys::TEST_NEURON_1_ID; use ic_nns_common::pb::v1::{NeuronId, ProposalId}; use ic_nns_constants::ROOT_CANISTER_ID; -use ic_nns_governance::pb::v1::{manage_neuron::NeuronIdOrSubaccount, proposal::Action, Proposal}; +use ic_nns_governance_api::pb::v1::{ + manage_neuron::NeuronIdOrSubaccount, proposal::Action, Proposal, +}; use ic_sns_governance::pb::v1::governance::Mode; use itertools::Itertools; use std::{ diff --git a/rs/sns/governance/BUILD.bazel b/rs/sns/governance/BUILD.bazel index 95ef4edc79d..709727fec1e 100644 --- a/rs/sns/governance/BUILD.bazel +++ b/rs/sns/governance/BUILD.bazel @@ -19,6 +19,7 @@ DEPENDENCIES = [ "//rs/nervous_system/clients", "//rs/nervous_system/collections/union_multi_map", "//rs/nervous_system/common", + "//rs/nervous_system/common/validation", "//rs/nervous_system/governance", "//rs/nervous_system/lock", "//rs/nervous_system/proto", diff --git a/rs/sns/governance/Cargo.toml b/rs/sns/governance/Cargo.toml index b36a509de5c..5cd8b438668 100644 --- a/rs/sns/governance/Cargo.toml +++ b/rs/sns/governance/Cargo.toml @@ -50,6 +50,7 @@ ic-nervous-system-canisters = { path = "../../nervous_system/canisters" } ic-nervous-system-clients = { path = "../../nervous_system/clients" } ic-nervous-system-collections-union-multi-map = { path = "../../nervous_system/collections/union_multi_map" } ic-nervous-system-common = { path = "../../nervous_system/common" } +ic-nervous-system-common-validation = { path = "../../nervous_system/common/validation" } ic-nervous-system-common-build-metadata = { path = "../../nervous_system/common/build_metadata" } ic-nervous-system-governance = { path = "../../nervous_system/governance" } ic-nervous-system-lock = { path = "../../nervous_system/lock" } diff --git a/rs/sns/governance/canister/governance.did b/rs/sns/governance/canister/governance.did index 6ac1061e143..71d97ba850b 100644 --- a/rs/sns/governance/canister/governance.did +++ b/rs/sns/governance/canister/governance.did @@ -283,6 +283,7 @@ type Governance = record { sns_metadata : opt ManageSnsMetadata; neurons : vec record { text; Neuron }; genesis_timestamp_seconds : nat64; + migrated_root_wasm_memory_limit : opt bool; }; type GovernanceCachedMetrics = record { diff --git a/rs/sns/governance/canister/governance_test.did b/rs/sns/governance/canister/governance_test.did index d005ebf1ea8..3dbb9aa5749 100644 --- a/rs/sns/governance/canister/governance_test.did +++ b/rs/sns/governance/canister/governance_test.did @@ -292,6 +292,7 @@ type Governance = record { sns_metadata : opt ManageSnsMetadata; neurons : vec record { text; Neuron }; genesis_timestamp_seconds : nat64; + migrated_root_wasm_memory_limit : opt bool; }; type GovernanceCachedMetrics = record { diff --git a/rs/sns/governance/proto/ic_sns_governance/pb/v1/governance.proto b/rs/sns/governance/proto/ic_sns_governance/pb/v1/governance.proto index 1b08f0a80a5..cd9e45e6780 100644 --- a/rs/sns/governance/proto/ic_sns_governance/pb/v1/governance.proto +++ b/rs/sns/governance/proto/ic_sns_governance/pb/v1/governance.proto @@ -1446,6 +1446,8 @@ message Governance { } MaturityModulation maturity_modulation = 26; + + optional bool migrated_root_wasm_memory_limit = 27; } // Request message for 'get_metadata'. diff --git a/rs/sns/governance/src/canister_control.rs b/rs/sns/governance/src/canister_control.rs index f11e0798505..ea39693a01b 100644 --- a/rs/sns/governance/src/canister_control.rs +++ b/rs/sns/governance/src/canister_control.rs @@ -1,3 +1,4 @@ +use crate::governance::Governance; use crate::{ governance::log_prefix, logs::{ERROR, INFO}, @@ -15,6 +16,7 @@ use ic_canister_log::log; use ic_nervous_system_clients::{ canister_id_record::CanisterIdRecord, canister_status::{CanisterStatusResultFromManagementCanister, CanisterStatusType}, + update_settings::{CanisterSettings, UpdateSettings}, }; use std::convert::TryFrom; @@ -320,3 +322,32 @@ pub async fn perform_execute_generic_nervous_system_function_call( } } } + +pub async fn update_root_canister_settings( + governance: &Governance, + settings: CanisterSettings, +) -> Result<(), GovernanceError> { + let update_settings_args = UpdateSettings { + canister_id: PrincipalId::from(governance.proto.root_canister_id_or_panic()), + settings, + // allowed to be None + sender_canister_version: None, + }; + governance + .env + .call_canister( + ic_management_canister_types::IC_00, + "update_settings", + Encode!(&update_settings_args).expect("Unable to encode update_settings args."), + ) + .await + .map(|_reply| ()) + .map_err(|err| { + let err = GovernanceError::new_with_message( + ErrorType::External, + format!("Failed to update settings of the root canister: {:?}", err), + ); + log!(ERROR, "{}{:?}", log_prefix(), err); + err + }) +} diff --git a/rs/sns/governance/src/gen/ic_sns_governance.pb.v1.rs b/rs/sns/governance/src/gen/ic_sns_governance.pb.v1.rs index e8270dfc68e..7938ea3cf5a 100644 --- a/rs/sns/governance/src/gen/ic_sns_governance.pb.v1.rs +++ b/rs/sns/governance/src/gen/ic_sns_governance.pb.v1.rs @@ -1491,6 +1491,8 @@ pub struct Governance { pub is_finalizing_disburse_maturity: ::core::option::Option, #[prost(message, optional, tag = "26")] pub maturity_modulation: ::core::option::Option, + #[prost(bool, optional, tag = "27")] + pub migrated_root_wasm_memory_limit: ::core::option::Option, } /// Nested message and enum types in `Governance`. pub mod governance { diff --git a/rs/sns/governance/src/governance.rs b/rs/sns/governance/src/governance.rs index a6d9adbb3d6..a431b76c9bb 100644 --- a/rs/sns/governance/src/governance.rs +++ b/rs/sns/governance/src/governance.rs @@ -6,7 +6,7 @@ use crate::pb::v1::{ use crate::{ canister_control::{ get_canister_id, perform_execute_generic_nervous_system_function_call, - upgrade_canister_directly, + update_root_canister_settings, upgrade_canister_directly, }, logs::{ERROR, INFO}, neuron::{ @@ -84,6 +84,7 @@ use ic_management_canister_types::{ CanisterChangeDetails, CanisterInfoRequest, CanisterInfoResponse, CanisterInstallMode, }; use ic_nervous_system_clients::ledger_client::ICRC1Ledger; +use ic_nervous_system_clients::update_settings::CanisterSettings; use ic_nervous_system_collections_union_multi_map::UnionMultiMap; use ic_nervous_system_common::{ cmc::CMC, @@ -96,7 +97,10 @@ use ic_nervous_system_governance::maturity_modulation::{ }; use ic_nervous_system_lock::acquire; use ic_nervous_system_root::change_canister::ChangeCanisterRequest; -use ic_nns_constants::LEDGER_CANISTER_ID as NNS_LEDGER_CANISTER_ID; +use ic_nns_constants::{ + DEFAULT_SNS_NON_GOVERNANCE_CANISTER_WASM_MEMORY_LIMIT, + LEDGER_CANISTER_ID as NNS_LEDGER_CANISTER_ID, +}; use ic_protobuf::types::v1::CanisterInstallMode as CanisterInstallModeProto; use ic_sns_governance_proposal_criticality::ProposalCriticality; use ic_sns_governance_token_valuation::Valuation; @@ -4584,6 +4588,32 @@ impl Governance { true } + async fn maybe_migrate_root_wasm_memory_limit(&mut self) { + if self + .proto + .migrated_root_wasm_memory_limit + .unwrap_or_default() + { + return; + } + + // Set migrated_root_wasm_memory_limit to Some(true) so this doesn't run again + self.proto.migrated_root_wasm_memory_limit = Some(true); + + let settings = CanisterSettings { + wasm_memory_limit: Some(candid::Nat::from( + DEFAULT_SNS_NON_GOVERNANCE_CANISTER_WASM_MEMORY_LIMIT, + )), + ..Default::default() + }; + + // Set root settings + match update_root_canister_settings(self, settings).await { + Ok(_) => (), + Err(e) => log!(ERROR, "Failed to update root canister settings: {}", e), + } + } + /// Runs periodic tasks that are not directly triggered by user input. pub async fn heartbeat(&mut self) { self.process_proposals(); @@ -4621,6 +4651,8 @@ impl Governance { self.maybe_move_staked_maturity(); self.maybe_gc(); + + self.maybe_migrate_root_wasm_memory_limit().await; } fn should_update_maturity_modulation(&self) -> bool { diff --git a/rs/sns/governance/src/proposal.rs b/rs/sns/governance/src/proposal.rs index 2653ab8ba2b..b47276b5de5 100644 --- a/rs/sns/governance/src/proposal.rs +++ b/rs/sns/governance/src/proposal.rs @@ -2489,6 +2489,7 @@ mod tests { pending_version: None, is_finalizing_disburse_maturity: None, maturity_modulation: None, + migrated_root_wasm_memory_limit: Some(true), } } diff --git a/rs/sns/governance/src/types.rs b/rs/sns/governance/src/types.rs index 1a07cfde63d..d83fedcc18c 100644 --- a/rs/sns/governance/src/types.rs +++ b/rs/sns/governance/src/types.rs @@ -46,9 +46,10 @@ use ic_icrc1_ledger::UpgradeArgs as LedgerUpgradeArgs; use ic_ledger_core::tokens::TOKEN_SUBDIVIDABLE_BY; use ic_management_canister_types::CanisterInstallModeError; use ic_nervous_system_common::{ - ledger_validation::MAX_LOGO_LENGTH, validate_proposal_url, NervousSystemError, - DEFAULT_TRANSFER_FEE, ONE_DAY_SECONDS, ONE_MONTH_SECONDS, ONE_YEAR_SECONDS, + ledger_validation::MAX_LOGO_LENGTH, NervousSystemError, DEFAULT_TRANSFER_FEE, ONE_DAY_SECONDS, + ONE_MONTH_SECONDS, ONE_YEAR_SECONDS, }; +use ic_nervous_system_common_validation::validate_proposal_url; use ic_nervous_system_proto::pb::v1::{Duration as PbDuration, Percentage}; use ic_sns_governance_proposal_criticality::{ ProposalCriticality, VotingDurationParameters, VotingPowerThresholds, diff --git a/rs/sns/governance/tests/fixtures/environment_fixture.rs b/rs/sns/governance/tests/fixtures/environment_fixture.rs index e4e0d9d08d2..ad0238cc426 100644 --- a/rs/sns/governance/tests/fixtures/environment_fixture.rs +++ b/rs/sns/governance/tests/fixtures/environment_fixture.rs @@ -1,6 +1,7 @@ use async_trait::async_trait; use candid::{CandidType, Decode, Encode, Error}; use ic_base_types::CanisterId; +use ic_nervous_system_clients::update_settings::CanisterSettings; use ic_sns_governance::{ pb::sns_root_types::{RegisterDappCanistersRequest, SetDappControllersRequest}, types::{Environment, HeapGrowthPotential}, @@ -15,6 +16,7 @@ type CanisterCallResult = Result, (Option, String)>; pub enum CanisterCallRequest { RegisterDappCanisters(RegisterDappCanistersRequest), SetDappControllers(SetDappControllersRequest), + UpdateSettings(CanisterSettings), } #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] @@ -70,6 +72,9 @@ impl EnvironmentFixture { "set_dapp_controllers" => { CanisterCallRequest::SetDappControllers(Decode!(&args, SetDappControllersRequest)?) } + "update_settings" => { + CanisterCallRequest::UpdateSettings(Decode!(&args, CanisterSettings)?) + } _ => panic!("Unsupported method_name `{method_name}` in decode_canister_call."), }; @@ -173,7 +178,7 @@ impl Environment for EnvironmentFixture { .unwrap() .mocked_canister_replies .pop() - .expect("Expected there to be a mocked canister reply on the stack"), + .unwrap_or_else(|| panic!("Expected there to be a mocked canister reply on the stack for method `{method_name}`")), ); match encode_result { diff --git a/rs/sns/governance/tests/fixtures/mod.rs b/rs/sns/governance/tests/fixtures/mod.rs index adc639fd04e..45888757107 100755 --- a/rs/sns/governance/tests/fixtures/mod.rs +++ b/rs/sns/governance/tests/fixtures/mod.rs @@ -854,6 +854,7 @@ impl Default for GovernanceCanisterFixtureBuilder { current_basis_points: Some(0), updated_at_timestamp_seconds: Some(1), }), + migrated_root_wasm_memory_limit: Some(true), ..Default::default() }, sns_ledger_transforms: Vec::default(), @@ -1074,6 +1075,11 @@ impl GovernanceCanisterFixtureBuilder { // Set up the canister fixture with our neuron. self.add_neuron(neuron) } + + pub fn set_migrated_root_wasm_memory_limit(mut self, value: bool) -> Self { + self.governance.migrated_root_wasm_memory_limit = Some(value); + self + } } #[macro_export] diff --git a/rs/sns/governance/tests/governance.rs b/rs/sns/governance/tests/governance.rs index 810f1c65200..a57ee4106ea 100644 --- a/rs/sns/governance/tests/governance.rs +++ b/rs/sns/governance/tests/governance.rs @@ -3,6 +3,7 @@ use crate::fixtures::{ GovernanceCanisterFixtureBuilder, NeuronBuilder, TargetLedger, }; use assert_matches::assert_matches; +use fixtures::environment_fixture::CanisterCallReply; use ic_base_types::{CanisterId, PrincipalId}; use ic_nervous_system_common::{E8, ONE_DAY_SECONDS, ONE_MONTH_SECONDS}; use ic_nervous_system_common_test_keys::{ @@ -3017,3 +3018,44 @@ fn test_deregister_dapp_has_higher_voting_thresholds() { Percentage::from_basis_points(2000) ); } + +#[test] +fn test_updates_root_settings() { + let mut gov = { + // Set up the test environment migrated_root_wasm_memory_limit set to Some(false); + let gov_fixture_builder = + GovernanceCanisterFixtureBuilder::new().set_migrated_root_wasm_memory_limit(false); + let gov = gov_fixture_builder.create(); + // The heartbeat will call the management canister to update the settings, + // so we need to mock the reply + gov.environment_fixture + .environment_fixture_state + .lock() + .unwrap() + .mocked_canister_replies + .push(CanisterCallReply::Response(Vec::new())); + gov + }; + + gov.heartbeat(); + assert!(gov + .governance + .proto + .migrated_root_wasm_memory_limit + .unwrap()); +} + +#[test] +#[should_panic( + expected = "Expected there to be a mocked canister reply on the stack for method `update_settings`" +)] +fn test_updates_root_settings_calls_management_canister() { + let mut gov = { + // Set up the test environment migrated_root_wasm_memory_limit set to Some(false); + let gov_fixture_builder = + GovernanceCanisterFixtureBuilder::new().set_migrated_root_wasm_memory_limit(false); + gov_fixture_builder.create() + }; + + gov.heartbeat(); // should panic +} diff --git a/rs/sns/governance/token_valuation/src/lib.rs b/rs/sns/governance/token_valuation/src/lib.rs index bc653f359e4..54f1942dc3c 100644 --- a/rs/sns/governance/token_valuation/src/lib.rs +++ b/rs/sns/governance/token_valuation/src/lib.rs @@ -358,7 +358,7 @@ impl IcpsPerSnsTokenClient { "Unable to determine ICPs per SNS token, because calling swap canister \ {} failed. Request:\n{}\nerr: {:?}", self.swap_canister_id, - clamp_debug_len(request, /* max_len = */ 100), + clamp_debug_len(&request, /* max_len = */ 100), err, )) }) diff --git a/rs/sns/init/BUILD.bazel b/rs/sns/init/BUILD.bazel index 80f5b697f39..3bbc528ef9e 100644 --- a/rs/sns/init/BUILD.bazel +++ b/rs/sns/init/BUILD.bazel @@ -33,12 +33,14 @@ BASE_DEPENDENCIES = [ # Each target declared in this file may choose either these (release-ready) # dependencies (`DEPENDENCIES`), or `DEPENDENCIES_WITH_TEST_FEATURES` feature previews. DEPENDENCIES = BASE_DEPENDENCIES + [ + "//rs/nns/governance/api", "//rs/nns/constants", "//rs/sns/governance", "//rs/sns/swap", ] DEPENDENCIES_WITH_TEST_FEATURES = BASE_DEPENDENCIES + [ + "//rs/nns/governance/api:api--test_feature", "//rs/nns/constants:constants--test_feature", "//rs/sns/governance:governance--test_feature", "//rs/sns/swap:swap--test_feature", diff --git a/rs/sns/init/Cargo.toml b/rs/sns/init/Cargo.toml index 75fed93a005..dae0d74999e 100644 --- a/rs/sns/init/Cargo.toml +++ b/rs/sns/init/Cargo.toml @@ -20,6 +20,7 @@ ic-ledger-core = { path = "../../rosetta-api/ledger_core" } ic-nervous-system-common = { path = "../../nervous_system/common" } ic-nervous-system-proto = { path = "../../nervous_system/proto" } ic-nns-constants = { path = "../../nns/constants" } +ic-nns-governance-api = { path = "../../nns/governance/api" } ic-sns-governance = { path = "../governance" } ic-sns-root = { path = "../root" } ic-sns-swap = { path = "../swap" } diff --git a/rs/sns/init/src/create_service_nervous_system.rs b/rs/sns/init/src/create_service_nervous_system.rs new file mode 100644 index 00000000000..441d2527c51 --- /dev/null +++ b/rs/sns/init/src/create_service_nervous_system.rs @@ -0,0 +1,459 @@ +use crate::pb::v1::{ + sns_init_payload, AirdropDistribution, DappCanisters, DeveloperDistribution, + FractionalDeveloperVotingPower, NeuronDistribution, SnsInitPayload, SwapDistribution, + TreasuryDistribution, +}; +use ic_nns_governance_api::pb::v1::{create_service_nervous_system, CreateServiceNervousSystem}; + +fn divide_perfectly(field_name: &str, dividend: u64, divisor: u64) -> Result { + match dividend.checked_rem(divisor) { + None => Err(format!( + "Attempted to divide by zero while validating {}. \ + (This is likely due to an internal bug.)", + field_name, + )), + + Some(0) => Ok(dividend.saturating_div(divisor)), + + Some(remainder) => { + assert_ne!(remainder, 0); + Err(format!( + "{} is supposed to contain a value that is evenly divisible by {}, \ + but it contains {}, which leaves a remainder of {}.", + field_name, divisor, dividend, remainder, + )) + } + } +} + +// CreateServiceNervousSystem - should be internal type? +impl TryFrom for SnsInitPayload { + type Error = String; + + // This validation should just be put into separate function + fn try_from(src: CreateServiceNervousSystem) -> Result { + let CreateServiceNervousSystem { + name, + description, + url, + logo, + fallback_controller_principal_ids, + dapp_canisters, + + initial_token_distribution, + + swap_parameters, + ledger_parameters, + governance_parameters, + } = src; + + let mut defects = vec![]; + + let ledger_parameters = ledger_parameters.unwrap_or_default(); + let governance_parameters = governance_parameters.unwrap_or_default(); + let swap_parameters = swap_parameters.unwrap_or_default(); + + let create_service_nervous_system::LedgerParameters { + transaction_fee, + token_name, + token_symbol, + token_logo, + } = ledger_parameters; + + let transaction_fee_e8s = transaction_fee.and_then(|tokens| tokens.e8s); + + let token_logo = token_logo.and_then(|image| image.base64_encoding); + + let proposal_reject_cost_e8s = governance_parameters + .proposal_rejection_fee + .and_then(|tokens| tokens.e8s); + + let neuron_minimum_stake_e8s = governance_parameters + .neuron_minimum_stake + .and_then(|tokens| tokens.e8s); + + let initial_token_distribution = match sns_init_payload::InitialTokenDistribution::try_from( + initial_token_distribution.unwrap_or_default(), + ) { + Ok(ok) => Some(ok), + Err(err) => { + defects.push(err); + None + } + }; + + let fallback_controller_principal_ids = fallback_controller_principal_ids + .iter() + .map(|principal_id| principal_id.to_string()) + .collect(); + + let logo = logo.and_then(|image| image.base64_encoding); + // url, name, and description need no conversion. + + let neuron_minimum_dissolve_delay_to_vote_seconds = governance_parameters + .neuron_minimum_dissolve_delay_to_vote + .and_then(|duration| duration.seconds); + + let voting_reward_parameters = governance_parameters + .voting_reward_parameters + .unwrap_or_default(); + + let initial_reward_rate_basis_points = voting_reward_parameters + .initial_reward_rate + .and_then(|percentage| percentage.basis_points); + let final_reward_rate_basis_points = voting_reward_parameters + .final_reward_rate + .and_then(|percentage| percentage.basis_points); + + let reward_rate_transition_duration_seconds = voting_reward_parameters + .reward_rate_transition_duration + .and_then(|duration| duration.seconds); + + let max_dissolve_delay_seconds = governance_parameters + .neuron_maximum_dissolve_delay + .and_then(|duration| duration.seconds); + + let max_neuron_age_seconds_for_age_bonus = governance_parameters + .neuron_maximum_age_for_age_bonus + .and_then(|duration| duration.seconds); + + let mut basis_points_to_percentage = + |field_name, percentage: ic_nervous_system_proto::pb::v1::Percentage| -> u64 { + let basis_points = percentage.basis_points.unwrap_or_default(); + match divide_perfectly(field_name, basis_points, 100) { + Ok(ok) => ok, + Err(err) => { + defects.push(err); + basis_points.saturating_div(100) + } + } + }; + + let max_dissolve_delay_bonus_percentage = governance_parameters + .neuron_maximum_dissolve_delay_bonus + .map(|percentage| { + basis_points_to_percentage( + "governance_parameters.neuron_maximum_dissolve_delay_bonus", + percentage, + ) + }); + + let max_age_bonus_percentage = + governance_parameters + .neuron_maximum_age_bonus + .map(|percentage| { + basis_points_to_percentage( + "governance_parameters.neuron_maximum_age_bonus", + percentage, + ) + }); + + let initial_voting_period_seconds = governance_parameters + .proposal_initial_voting_period + .and_then(|duration| duration.seconds); + + let wait_for_quiet_deadline_increase_seconds = governance_parameters + .proposal_wait_for_quiet_deadline_increase + .and_then(|duration| duration.seconds); + + let dapp_canisters = Some(DappCanisters { + canisters: dapp_canisters, + }); + + let confirmation_text = swap_parameters.confirmation_text; + + let restricted_countries = swap_parameters.restricted_countries; + + let min_participants = swap_parameters.minimum_participants; + + let min_direct_participation_icp_e8s = swap_parameters + .minimum_direct_participation_icp + .and_then(|tokens| tokens.e8s); + + let max_direct_participation_icp_e8s = swap_parameters + .maximum_direct_participation_icp + .and_then(|tokens| tokens.e8s); + + // Check if the deprecated fields are set. + if let Some(neurons_fund_investment_icp) = swap_parameters.neurons_fund_investment_icp { + defects.push(format!( + "neurons_fund_investment_icp ({:?}) is deprecated; please set \ + neurons_fund_participation instead.", + neurons_fund_investment_icp, + )); + } + if let Some(minimum_icp) = swap_parameters.minimum_icp { + defects.push(format!( + "minimum_icp ({:?}) is deprecated; please set \ + min_direct_participation_icp_e8s instead.", + minimum_icp, + )); + }; + if let Some(maximum_icp) = swap_parameters.maximum_icp { + defects.push(format!( + "maximum_icp ({:?}) is deprecated; please set \ + max_direct_participation_icp_e8s instead.", + maximum_icp, + )); + }; + + let neurons_fund_participation = swap_parameters.neurons_fund_participation; + + let min_participant_icp_e8s = swap_parameters + .minimum_participant_icp + .and_then(|tokens| tokens.e8s); + + let max_participant_icp_e8s = swap_parameters + .maximum_participant_icp + .and_then(|tokens| tokens.e8s); + + let neuron_basket_construction_parameters = swap_parameters + .neuron_basket_construction_parameters + .map( + |basket| ic_sns_swap::pb::v1::NeuronBasketConstructionParameters { + count: basket.count.unwrap_or_default(), + dissolve_delay_interval_seconds: basket + .dissolve_delay_interval + .map(|duration| duration.seconds.unwrap_or_default()) + .unwrap_or_default(), + }, + ); + + if !defects.is_empty() { + return Err(format!( + "Failed to convert CreateServiceNervousSystem proposal to SnsInitPayload:\n{}", + defects.join("\n"), + )); + } + + let result = Self { + transaction_fee_e8s, + token_name, + token_symbol, + proposal_reject_cost_e8s, + neuron_minimum_stake_e8s, + initial_token_distribution, + fallback_controller_principal_ids, + logo, + url, + name, + description, + neuron_minimum_dissolve_delay_to_vote_seconds, + initial_reward_rate_basis_points, + final_reward_rate_basis_points, + reward_rate_transition_duration_seconds, + max_dissolve_delay_seconds, + max_neuron_age_seconds_for_age_bonus, + max_dissolve_delay_bonus_percentage, + max_age_bonus_percentage, + initial_voting_period_seconds, + wait_for_quiet_deadline_increase_seconds, + dapp_canisters, + min_participants, + min_direct_participation_icp_e8s, + max_direct_participation_icp_e8s, + min_participant_icp_e8s, + max_participant_icp_e8s, + neuron_basket_construction_parameters, + confirmation_text, + restricted_countries, + token_logo, + neurons_fund_participation, + + // These are not known from only the CreateServiceNervousSystem + // proposal. See `Governance::make_sns_init_payload`. + nns_proposal_id: None, + swap_start_timestamp_seconds: None, + swap_due_timestamp_seconds: None, + neurons_fund_participation_constraints: None, + + // Deprecated fields should be set to `None`. + min_icp_e8s: None, + max_icp_e8s: None, + }; + + result.validate_pre_execution()?; + + Ok(result) + } +} + +impl TryFrom + for sns_init_payload::InitialTokenDistribution +{ + type Error = String; + + fn try_from( + src: create_service_nervous_system::InitialTokenDistribution, + ) -> Result { + let create_service_nervous_system::InitialTokenDistribution { + developer_distribution, + treasury_distribution, + swap_distribution, + } = src; + + let mut defects = vec![]; + + let developer_distribution = + match DeveloperDistribution::try_from(developer_distribution.unwrap_or_default()) { + Ok(ok) => Some(ok), + Err(err) => { + defects.push(err); + None + } + }; + + let treasury_distribution = + match TreasuryDistribution::try_from(treasury_distribution.unwrap_or_default()) { + Ok(ok) => Some(ok), + Err(err) => { + defects.push(err); + None + } + }; + + let swap_distribution = + match SwapDistribution::try_from(swap_distribution.unwrap_or_default()) { + Ok(ok) => Some(ok), + Err(err) => { + defects.push(err); + None + } + }; + + let airdrop_distribution = Some(AirdropDistribution::default()); + + if !defects.is_empty() { + return Err(format!( + "Failed to convert to InitialTokenDistribution for the following reasons:\n{}", + defects.join("\n"), + )); + } + + Ok(Self::FractionalDeveloperVotingPower( + FractionalDeveloperVotingPower { + developer_distribution, + treasury_distribution, + swap_distribution, + airdrop_distribution, + }, + )) + } +} + +impl TryFrom + for SwapDistribution +{ + type Error = String; + + fn try_from( + src: create_service_nervous_system::initial_token_distribution::SwapDistribution, + ) -> Result { + let create_service_nervous_system::initial_token_distribution::SwapDistribution { total } = + src; + + let total_e8s = total.unwrap_or_default().e8s.unwrap_or_default(); + let initial_swap_amount_e8s = total_e8s; + + Ok(Self { + initial_swap_amount_e8s, + total_e8s, + }) + } +} + +impl TryFrom + for TreasuryDistribution +{ + type Error = String; + + fn try_from( + src: create_service_nervous_system::initial_token_distribution::TreasuryDistribution, + ) -> Result { + let create_service_nervous_system::initial_token_distribution::TreasuryDistribution { + total, + } = src; + + let total_e8s = total.unwrap_or_default().e8s.unwrap_or_default(); + + Ok(Self { total_e8s }) + } +} + +impl TryFrom + for DeveloperDistribution +{ + type Error = String; + + fn try_from( + src: create_service_nervous_system::initial_token_distribution::DeveloperDistribution, + ) -> Result { + let create_service_nervous_system::initial_token_distribution::DeveloperDistribution { + developer_neurons, + } = src; + + let mut defects = vec![]; + + let developer_neurons = developer_neurons + .into_iter() + .enumerate() + .filter_map(|(i, neuron_distribution)| { + match NeuronDistribution::try_from(neuron_distribution) { + Ok(ok) => Some(ok), + Err(err) => { + defects.push(format!( + "Failed to convert element at index {} in field \ + `developer_neurons`: {}", + i, err, + )); + None + } + } + }) + .collect(); + + if !defects.is_empty() { + return Err(format!( + "Failed to convert to DeveloperDistribution for SnsInitPayload: {}", + defects.join("\n"), + )); + } + + Ok(Self { developer_neurons }) + } +} + +impl TryFrom +for NeuronDistribution +{ + type Error = String; + + fn try_from( + src: create_service_nervous_system::initial_token_distribution::developer_distribution::NeuronDistribution, + ) -> Result { + let create_service_nervous_system::initial_token_distribution::developer_distribution::NeuronDistribution { + controller, + dissolve_delay, + memo, + stake, + vesting_period, + } = src; + + // controller needs no conversion + let stake_e8s = stake.unwrap_or_default().e8s.unwrap_or_default(); + let memo = memo.unwrap_or_default(); + let dissolve_delay_seconds = dissolve_delay + .unwrap_or_default() + .seconds + .unwrap_or_default(); + let vesting_period_seconds = vesting_period.unwrap_or_default().seconds; + + Ok(Self { + controller, + stake_e8s, + memo, + dissolve_delay_seconds, + vesting_period_seconds, + }) + } +} diff --git a/rs/sns/init/src/lib.rs b/rs/sns/init/src/lib.rs index f47b0679a59..51bf7c554f5 100644 --- a/rs/sns/init/src/lib.rs +++ b/rs/sns/init/src/lib.rs @@ -44,6 +44,7 @@ use std::{ string::ToString, }; +mod create_service_nervous_system; pub mod distributions; pub mod pb; diff --git a/rs/sns/integration_tests/BUILD.bazel b/rs/sns/integration_tests/BUILD.bazel index ecb7266963f..d62a4292e3c 100644 --- a/rs/sns/integration_tests/BUILD.bazel +++ b/rs/sns/integration_tests/BUILD.bazel @@ -85,7 +85,7 @@ BASE_DEV_DEPENDENCIES = [ # dependencies (`DEV_DEPENDENCIES`), or `TEST_DEV_DEPENDENCIES` feature previews. # (Currently not used) # DEV_DEPENDENCIES = BASE_DEV_DEPENDENCIES + [ -# "//rs/nns/governance", +# "//rs/nns/governance/api", # "//rs/nns/sns-wasm", # "//rs/nns/test_utils", # "//rs/sns/governance", diff --git a/rs/tests/networking/canisters/src/cloner_canister.rs b/rs/tests/networking/canisters/src/cloner_canister.rs index 3b68d151be4..cf1e20db6eb 100644 --- a/rs/tests/networking/canisters/src/cloner_canister.rs +++ b/rs/tests/networking/canisters/src/cloner_canister.rs @@ -26,10 +26,6 @@ thread_local! { static CANISTER_IDS: RefCell> = const { RefCell::new(BTreeSet::new()) }; } -fn store_canister_id(canister_id: CanisterId) { - CANISTER_IDS.with(|canister_ids| canister_ids.borrow_mut().insert(canister_id)); -} - async fn spinup_canister(wasm_module: Vec) -> CallResult<()> { // Create canister. let canister_id = create_canister( @@ -49,7 +45,7 @@ async fn spinup_canister(wasm_module: Vec) -> CallResult<()> { .canister_id; // Store canister id. - store_canister_id(canister_id); + CANISTER_IDS.with(|canister_ids| canister_ids.borrow_mut().insert(canister_id)); // Install code if provided. let is_wasm_module_empty = wasm_module.is_empty(); diff --git a/rs/tests/src/rosetta_tests/governance_client.rs b/rs/tests/src/rosetta_tests/governance_client.rs index 931cbc185f3..988ea68bdaf 100644 --- a/rs/tests/src/rosetta_tests/governance_client.rs +++ b/rs/tests/src/rosetta_tests/governance_client.rs @@ -8,7 +8,7 @@ use ic_nns_governance_api::{ manage_neuron_response, manage_neuron_response::MakeProposalResponse, MakeProposalRequest, ManageNeuronResponse, ProposalInfo, }, - proposal_helpers::create_make_proposal_payload, + proposal_submission_helpers::create_make_proposal_payload, }; use ic_system_test_driver::{ driver::{ diff --git a/testnet/mainnet_revisions.json b/testnet/mainnet_revisions.json index ba3df772e44..eb34c4fd547 100644 --- a/testnet/mainnet_revisions.json +++ b/testnet/mainnet_revisions.json @@ -1,6 +1,6 @@ { "subnets": { "tdb26-jop6k-aogll-7ltgs-eruif-6kk7m-qpktf-gdiqx-mxtrf-vb5e6-eqe": "5d1beaca74d0bf2ad2e8a37809e1e3ffa5ee9a32", - "io67a-2jmkw-zup3h-snbwi-g6a5n-rm5dn-b6png-lvdpl-nqnto-yih6l-gqe": "c180069c95739773293599d90c23defc12c3084f" + "io67a-2jmkw-zup3h-snbwi-g6a5n-rm5dn-b6png-lvdpl-nqnto-yih6l-gqe": "cacf86a2ea7e21150d106e9e4dda5973088f53c5" } } \ No newline at end of file