diff --git a/.gitmodules b/.gitmodules index ddde5a01..cf474ae4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,9 +1,12 @@ [submodule "riscv-linux"] - path = riscv-linux + path = boards/default/linux url = https://github.com/firesim/linux.git [submodule "riscv-pk"] - path = riscv-pk - url = https://github.com/firesim/riscv-pk.git + path = boards/default/firmware/riscv-pk + url = https://github.com/riscv/riscv-pk.git +[submodule "opensbi"] + path = boards/default/firmware/opensbi + url = https://github.com/riscv/opensbi.git [submodule "br/buildroot"] path = wlutil/br/buildroot url = https://github.com/buildroot/buildroot.git diff --git a/CHANGELOG.md b/CHANGELOG.md index ff58411c..2f636a0f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,56 @@ This changelog follows the format defined here: https://keepachangelog.com/en/1.0.0/ Versioning follows semantic versioning as described here: https://semver.org/spec/v2.0.0.html -## [1.9.0] - 2021-05-21 +## [1.10.0] - 2020-10-05 +The biggest change in this release is the introduction of OpenSBI as the +default firmware. BBL is still supported, but no longer the default. Other +changes include a number of performance improvements, better support for +user-provided kernel modules, and various bug fixes. + +### Added +* PR #165 adds the ability to include user-provided kernel modules. These are + automatically built and loaded as opposed to the 'post-bin' option which is + more flexible but can't load in the early boot and is more work. This also + unifies all linux-related options into their own 'linux' option. The old + 'linux-src' 'linux-config' options are still supported but deprecated, users + should now specify those in the 'linux' option. +* Firmware Improvements, OpenSBI support + * PR #152 adds support for OpenSBI as the default firmware. BBL is still + supported, but is no longer the default (use the "use-bbl" option). + * PR #172 Adds 'opensbi-build-args' and 'bbl-build-args' options. It also + introduces the new 'firmware' option group (deprecating the 'pk-src' + option and grouping all firmware-relevant options into one place). + +### Changed +* PR #156 patches the default kernel to enable RoCC instructions by default. + This is a common need and has minimal risk to non-RoCC platforms. +* Performance Improvements + * PR #159 moves submodule status checking to only run for the workload + being built (rather than every workload in the workload search path). + Before this, if you had many workloads in your project, marshal could spend a + lot of time checking the same modules over and over. + * PR #166 Copies the parent's binary instead of rebuilding for every child + if none of the binary-related options changed. This is a big performance + improvement for applications with many child workloads. +* PR #162 overhauls the unit testing framework. Previous versions were hard to + maintain and had various bugs that could mask failures from time to time. + There is now a standard way to write complex tests and all unit tests were + confirmed to pass. + +### Fixed +* Better support for files generated by host-init + * PR #158 correctly detects changes in files/overlay generated in host-init + (bug #157). + * PR #167 Handles kernel modules generated in host-init +* PR #160 broadens the scope of up to date criteria to include file metadata. + For example, adding execute permissions to a file in file/overlay previously + failed to trigger a rebuilt (bug #145) +* PR #162 ignores symlinks in overlays which often won't work if the symlink + points to files in the rootfs rather than the overlay. +* PR #164 fixes bug #163. A sufficiently large initramfs could overwrite kernel + memory. This tended to break Fedora --no-disk builds. + +## [1.9.0] - 2020-05-21 This is largely a maintenance release with a few minor features and a bunch of bug fixes. The most significant change is a bump to Linux 5.7rc3. The new 'firesim-dir' configuration option is also signficant because it enables more diff --git a/README.md b/README.md index cd9633fd..7f7bd0cc 100644 --- a/README.md +++ b/README.md @@ -36,11 +36,17 @@ pip3 install -r python-requirements.txt ## RISC-V Tools In addition to standard libraries, you will need a RISC-V compatible toolchain, -the RISC-V isa simulator (spike), and Qemu. +the RISC-V isa simulator (spike). See the [Chipyard documentation](https://chipyard.readthedocs.io/en/latest/Chipyard-Basics/Initial-Repo-Setup.html#building-a-toolchain) for help setting up a known-good toolchain and environment. +## Qemu +Qemu is the default simulator used by firemarshal. We require version v5.0.0 or +greater. If you aren't using chipyard, you can get it from: + +https://github.com/qemu/qemu/tree/v5.0.0 + # Basic Usage If you only want to build bare-metal workloads, you can skip updating submodules. Otherwise, you should update the required submodules by running: diff --git a/boards/default/README.md b/boards/default/README.md new file mode 100644 index 00000000..63525c6a --- /dev/null +++ b/boards/default/README.md @@ -0,0 +1,3 @@ +This board contains reasonable defaults for a generic system. You probably +don't want to use it directly, other boards typically symlink defaults from +here if they don't need to change anything. diff --git a/boards/default/firmware/opensbi b/boards/default/firmware/opensbi new file mode 160000 index 00000000..a98258d0 --- /dev/null +++ b/boards/default/firmware/opensbi @@ -0,0 +1 @@ +Subproject commit a98258d0b537a295f517bbc8d813007336731fa9 diff --git a/boards/default/firmware/riscv-pk b/boards/default/firmware/riscv-pk new file mode 160000 index 00000000..5d9ed238 --- /dev/null +++ b/boards/default/firmware/riscv-pk @@ -0,0 +1 @@ +Subproject commit 5d9ed238e1cabfbca3c47f50d32894ce94bfc304 diff --git a/riscv-linux b/boards/default/linux similarity index 100% rename from riscv-linux rename to boards/default/linux diff --git a/boards/firechip/base-workloads/br-base.json b/boards/firechip/base-workloads/br-base.json index 31d9b4c3..b20e6c5e 100644 --- a/boards/firechip/base-workloads/br-base.json +++ b/boards/firechip/base-workloads/br-base.json @@ -2,7 +2,19 @@ "name" : "br-base", "base" : "br", "overlay" : "overlay", - "linux-config" : "linux-config", + "linux" : { + "source" : "../../linux", + "config" : "linux-config", + "modules" : { + "icenet" : "../../drivers/icenet-driver", + "iceblk" : "../../drivers/iceblk-driver" + } + }, + "firmware" : { + "use-bbl" : false, + "bbl-src" : "../../firmware/riscv-pk", + "opensbi-src" : "../../firmware/opensbi" + }, "host-init" : "host-init.sh", "files" : [ [ "trigger/start", "/usr/bin/firesim-start-trigger"], diff --git a/boards/firechip/base-workloads/fedora-base.json b/boards/firechip/base-workloads/fedora-base.json index 07f60a12..be66a6ec 100644 --- a/boards/firechip/base-workloads/fedora-base.json +++ b/boards/firechip/base-workloads/fedora-base.json @@ -2,5 +2,17 @@ "name" : "fedora-base", "base" : "fedora", "overlay" : "overlay", - "linux-config" : "linux-config" + "linux" : { + "source" : "../../linux", + "config" : "linux-config", + "modules" : { + "icenet" : "../../drivers/icenet-driver", + "iceblk" : "../../drivers/iceblk-driver" + } + }, + "firmware" : { + "use-bbl" : false, + "bbl-src" : "../../firmware/riscv-pk", + "opensbi-src" : "../../firmware/opensbi" + } } diff --git a/boards/firechip/firmware b/boards/firechip/firmware new file mode 120000 index 00000000..7913b9bd --- /dev/null +++ b/boards/firechip/firmware @@ -0,0 +1 @@ +../default/firmware/ \ No newline at end of file diff --git a/boards/firechip/linux b/boards/firechip/linux new file mode 120000 index 00000000..e790db21 --- /dev/null +++ b/boards/firechip/linux @@ -0,0 +1 @@ +../default/linux/ \ No newline at end of file diff --git a/docs/source/workloadConfig.rst b/docs/source/workloadConfig.rst index b7ad3f56..6623fb00 100644 --- a/docs/source/workloadConfig.rst +++ b/docs/source/workloadConfig.rst @@ -71,14 +71,25 @@ instructions or hardware models. Defaults to the version of spike on your PATH. .. _workload-linux-src: -linux-src -^^^^^^^^^^^^^^^^ -Path to riscv-linux source directory to use when building the boot-binary for -this workload. Defaults to the riscv-linux source submoduled at -``riscv-linux/``. +linux-src (Deprecated) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +See the 'linux/source' option for the new equivalent. -linux-config -^^^^^^^^^^^^^^^^ +linux-config (Deprecated) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +See the 'linux/config' option for the new equivalent. + +linux +^^^^^^^^^^^^^^^^^ +Specification for linux customization. + +source ++++++++++++++++++ +Path to linux source directory to use when building the boot-binary for +this workload. Will default to your board's linux source. + +config +++++++++++++++++++ Linux configuration fragment to use. This file has the same format as linux configuration files but only contains the options required by this workload. Marshal will include a few options on top of the RISC-V default configuration, @@ -91,10 +102,56 @@ platform drivers. Inheritance happens by merging the current workload's linux-config with all parent configs, with more recent options taking precedence of earlier options. -pk-src -^^^^^^^^^^^^^^ +modules ++++++++++++++++++++ +A dictionary of the form {MODULE_NAME : PATH_TO_MODULE_SRC, ...} + +Kernel modules to compile and load automatically in your workload. Modules will +be loaded in the initramfs before loading the main user root. Modules are +identified by the .ko file name (the name listed by lsmod). Workload-defined +modules with the same name as an inherited module will take precidence, +allowing you to override system default drivers. If you need to manually load a +module (it shouldn't be automatically loaded), you should manually compile it in a +post-bin script instead. + +The specified module directory must contain a Makefile that can be invoked as: + + make LINUXSRC=PATH/TO/LINUX + +It outputs one or more .ko files to be loaded. See test/kmod/ for an example. + +pk-src (Deprecated) +^^^^^^^^^^^^^^^^^^^^ +See the ‘firmware/bbl-src’ option for the new equivalent. + +firmware +^^^^^^^^^^^^^^^^^^^ +Firmware-related options + +opensbi-src ++++++++++++++++++++++ +Path to alternative opensbi source directory. Defaults to the board-provided +opensbi. + +opensbi-build-args ++++++++++++++++++++++ +Additional command line arguments to the 'make' command used to build opensbi. + +bbl-src +++++++++++++++++++ Path to riscv-pk source directory to use for this workload. This provides the -bootloader (bbl). Defaults to the riscv-pk submodule included at ``riscv-pk/``. +bootloader (bbl). Defaults to the riscv-pk source included with the board. + +bbl-build-args +++++++++++++++++++ +Additional arguments to be passed to the configure script for bbl. Arguments +will be appended to any system defaults. + +use-bbl +++++++++++++++++++ +Use BBL as the firmware rather than the default OpenSBI. If you specified +'bbl-src', that will be used, otherwise the default implementation from the +board will be used. host-init ^^^^^^^^^^^^^^ @@ -289,7 +346,7 @@ the following options (only ``refDir`` is required): *Non-heritable* refDir -^^^^^^^^^^^^^^^ +++++++++++++++++ Path to a directory containing reference outputs for this workload. Directory structures are compared directly (same folders, same file names). Regular files are compared exactly. Serial outputs (uartlog) need only match a subset of @@ -297,7 +354,7 @@ outputs; the entire reference uartlog contents must exist somewhere (contiguously) in the test uartlog. buildTimeout -^^^^^^^^^^^^^^^^^^^ ++++++++++++++++++++++ Maximum time (in seconds) that the workload should take to build. The test will fail if building takes longer than this. Defaults to infinite. @@ -305,13 +362,13 @@ fail if building takes longer than this. Defaults to infinite. long time to build. runTimeout -^^^^^^^^^^^^^^^^^^ ++++++++++++++++++ Maximum time (in seconds) that any particular job should take to run and exit. The test will fail if a job runs for longer than this before exiting. Defaults to infinite. strip -^^^^^^^^^^^^^^^^ ++++++++++++++++ Attempt to clean up the uartlog output before comparing against the reference. This will remove all lines not generated by a run script or command, as well as stripping out any extra characters that might be added by the run-system (e.g. diff --git a/init-submodules.sh b/init-submodules.sh index 81b3ff91..d4524114 100755 --- a/init-submodules.sh +++ b/init-submodules.sh @@ -6,8 +6,9 @@ # You do not need to call this script if you only intend to build bare-metal workloads. git submodule update --init \ - riscv-linux \ - riscv-pk \ + boards/default/linux \ + boards/default/firmware/riscv-pk \ + boards/default/firmware/opensbi \ wlutil/busybox \ wlutil/br/buildroot \ boards/firechip/drivers/* diff --git a/marshal b/marshal index 04c9200a..b2651244 100755 --- a/marshal +++ b/marshal @@ -75,6 +75,8 @@ def main(): ctx = wlutil.getCtx() + wlutil.initLogging(args.verbose) + # Load all the configs from the workload directories # Order matters here, duplicate workload files found in later search paths # will overwrite files found in earlier search paths. diff --git a/riscv-pk b/riscv-pk deleted file mode 160000 index cad3deb3..00000000 --- a/riscv-pk +++ /dev/null @@ -1 +0,0 @@ -Subproject commit cad3deb357d25773a22e2c346ef464d3d66dd37c diff --git a/scripts/.gitignore b/scripts/.gitignore index fcf6a324..8b0a0a02 100644 --- a/scripts/.gitignore +++ b/scripts/.gitignore @@ -1 +1 @@ -test_logs/ +testLogs/ diff --git a/scripts/fullTest.py b/scripts/fullTest.py new file mode 100755 index 00000000..4e899d9d --- /dev/null +++ b/scripts/fullTest.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python3 +import pathlib +import sys +import argparse +import time +import logging +import subprocess as sp + +sys.path.append("..") +import wlutil + +rootDir = pathlib.Path(__file__).parent.resolve() +logDir = rootDir / "testLogs" +testDir = (rootDir / '../test').resolve() +marshalBin = (rootDir / "../marshal").resolve() + +categories = ['baremetal', 'qemu', 'spike', 'smoke', 'special'] + +# Arguments to (marshal, marshal CMD) per category +categoryArgs = { + 'baremetal': ([], ["--spike"]), + 'qemu' : ([], []), + 'smoke' : ([], []), + 'spike' : (['--no-disk'], ['--spike']), + 'special' : ([], []) +} + +# lists of test names to run for each category, each name +# should correspond to a test in FireMarshal/tests. E.G. "command" means +# "FireMarshal/test/command.json". +categoryTests = { + # Run on spike. These tests depend only on an installed toolchain, you + # don't need to initialize Marshal's submodules to run this category + 'baremetal' : [ + 'bare', + 'dummy-bare', + 'spike', + 'spike-jobs', + 'spike-args', + 'rocc' + ], + + # These is the most complete 'general' tests and is the way most people + # will use Marshal + "qemu" : [ + 'bbl', + 'bbl-src', + 'bbl-args', + 'opensbi-src', + 'opensbi-args', + 'command', + 'driversJob', + 'drivers', + 'fed-run', + 'flist', + 'generateFiles', + 'guest-init', + 'hard', + 'host-init', + 'jobs', + 'kfrag', + 'linux-src', + 'makefile', + 'outputs', + 'overlay', + 'post-bin-jobs', + 'post-bin', + 'post_run_hook', + 'qemu-args', + 'qemu', + 'run', + 'simArgs', + ], + + # This tests both no-disk and spike. In theory, most (maybe all?) tests + # in "qemu" could run nodisk on spike, but it wouldn't really test + # anything new. We just include a few at-risk tests here to shave a few + # hours off the full test. Smoke runs also use spike. + "spike" : [ + 'command', + 'flist', + 'host-init', + 'jobs', + 'linux-src', + 'overlay', + 'post_run_hook', + 'simArgs', + 'bbl' + ], + + # A hopefully minimal and fast(ish) set of tests to make sure nothing + # obvious is broken + "smoke" : [ + 'fed-smoke0', + 'smoke0', + 'smoke1', + 'smoke2', + ], + + # These tests aren't run directly. Instead they include a testing + # script that is run. + "special" : [ + 'clean', + 'incremental', + 'inherit', + 'sameWorkdir', + 'fsSize', + 'makefile', + 'testWorkdir', + 'workload-dirs' + ] +} + + +def runTests(testNames, categoryName, marshalArgs=[], cmdArgs=[]): + """Run the tests named in testNames. Logging will use categoryName to + identify this set of tests. marshalArgs and cmdArgs are the arguments to + pass to 'marshal' and 'marshal test', respectively.""" + log = logging.getLogger() + + # Tuples of (testName, exception) for each failed test + failures=[] + + for tName in testNames: + log.log(logging.INFO, "[{}] {}:".format(categoryName, tName)) + tPath = testDir / (tName + ".json") + + try: + # These log at level DEBUG (go to log file but not stdout) + wlutil.run([marshalBin] + marshalArgs + ['clean', tPath], check=True) + wlutil.run([marshalBin] + marshalArgs + ['test'] + cmdArgs + [tPath], check=True) + except sp.CalledProcessError as e: + log.log(logging.INFO, "FAIL") + failures.append(("[{}]: {}".format(categoryName, tName), e)) + continue + + log.log(logging.INFO, "PASS") + + return failures + + +def runSpecial(testNames, categoryName): + """Run the tests named in testNamed assuming they are special tests. Each + name should be a directory under firemarshal/test/ and should have a + test.py script that will be run and indicates pass/fail via return code. + The tests will be called as such: ./test.py pathToMarshalBin""" + + log = logging.getLogger() + + # Tuples of (testName, exception) for each failed test + failures=[] + + for tName in testNames: + log.log(logging.INFO, "[{}] {}:".format(categoryName, tName)) + tPath = testDir / tName + + try: + wlutil.run(["python3", tPath / "test.py", marshalBin], check=True) + except sp.CalledProcessError as e: + log.log(logging.INFO, "FAIL") + failures.append(("[{}]: {}".format(categoryName, tName), e)) + continue + + log.log(logging.INFO, "PASS") + + return failures + +if __name__ == "__main__": + logDir.mkdir(exist_ok=True) + + timeline = time.strftime("%Y-%m-%d--%H-%M-%S", time.gmtime()) + logPath = logDir / (timeline + "-FullTest.log") + wlutil.initLogging(False, logPath=logPath) + log = logging.getLogger() + + log.log(logging.INFO, "Logging live to: " + str(logPath)) + + parser = argparse.ArgumentParser(description="Run end-to-end FireMarshal tests (mostly in FireMarshal/test)") + + parser.add_argument("-c", "--categories", nargs="+", default=list(categories), + help="Specify which categorie(s) of test to run. By default, all tests will be run") + + # TODO: add a 'from-failures' option to only run tests that failed a previous run + + args = parser.parse_args() + + allFailures = [] + for category in args.categories: + if category != 'special': + allFailures += runTests(categoryTests[category], category, + marshalArgs=categoryArgs[category][0], cmdArgs=categoryArgs[category][1]) + else: + allFailures += runSpecial(categoryTests["special"], "SPECIAL") + + log.info("Test Summary:") + if len(allFailures) > 0: + log.info("Some tests failed:") + for fail in allFailures: + log.info(fail[0]) + sys.exit(1) + else: + log.info("All PASS") + sys.exit(0) diff --git a/scripts/full_test.sh b/scripts/full_test.sh deleted file mode 100755 index 972b9db4..00000000 --- a/scripts/full_test.sh +++ /dev/null @@ -1,183 +0,0 @@ -#!/bin/bash -# Enable extended globbing -shopt -s extglob - -TEST_DIR=../test -MARSHAL_BIN=../marshal - -SUITE_PASS=true -mkdir -p test_logs -LOGNAME=$(realpath $(mktemp test_logs/results_full_test.XXXX)) - -echo "Running Full Test. Results available in $LOGNAME" - -# These tests need to run on spike, but not with the no-disk option -echo "Running bare-metal tests" | tee -a $LOGNAME -# IS_INCLUDE="@(bare|dummy-bare|spike|spike-jobs|spike-args|rocc)" -IS_INCLUDE="@(bare)" -$MARSHAL_BIN clean $TEST_DIR/$IS_INCLUDE.json | tee -a $LOGNAME -$MARSHAL_BIN test -s $TEST_DIR/$IS_INCLUDE.json | tee -a $LOGNAME -if [ ${PIPESTATUS[0]} != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false -else - echo "Success" | tee -a $LOGNAME -fi -echo "" - -echo "Initializing submodules for linux-based tests" | tee -a $LOGNAME -./init-submodules.sh | tee -a $LOGNAME -echo "" - -# We pre-build to avoid potential timeouts on a fresh clone -echo "Pre-building base workloads" | tee -a $LOGNAME -$MARSHAL_BIN build $TEST_DIR/br-base.json -$MARSHAL_BIN build $TEST_DIR/fedora-base.json -echo "" - -echo "Running launch timeout test (should timeout):" | tee -a $LOGNAME -echo "This test will reset your terminal" -$MARSHAL_BIN test $TEST_DIR/timeout-run.json | grep "timeout while running" -res=${PIPESTATUS[1]} -reset -echo "Ran launch timeout test (screen was reset)" -if [ $res != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false -else - echo "Success" | tee -a $LOGNAME -fi -echo "" - -echo "Running build timeout test (should timeout):" | tee -a $LOGNAME -$MARSHAL_BIN test $TEST_DIR/timeout-build.json | grep "timeout while building" -if [ ${PIPESTATUS[1]} != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false -else - echo "Success" | tee -a $LOGNAME -fi -echo "" - -# Run the bulk tests (all work with the 'test' command) -# Note the funny extended globbing, these are just lists of tests that -# shouldn't be tested (e.g. we exclude the base configs and some specialized -# tests) -echo "Running regular tests" | tee -a $LOGNAME -BULK_EXCLUDE="(br-base|fedora-base|incremental|clean|timeout-build|timeout-run|bare|dummy-bare|spike-jobs|spike|spike-args|rocc|fsSize)" -$MARSHAL_BIN clean $TEST_DIR/!$BULK_EXCLUDE.json | tee -a $LOGNAME -$MARSHAL_BIN test $TEST_DIR/!$BULK_EXCLUDE.json | tee -a $LOGNAME -if [ ${PIPESTATUS[0]} != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false -else - echo "Success" | tee -a $LOGNAME -fi -echo "" - -# Run the no-disk versions on spike, no-disk runs have many restrictions, -# we only run a few tests here to test basic capabilities -echo "Running no-disk capable tests on spike" | tee -a $LOGNAME -IS_INCLUDE="@(command|flist|host-init|jobs|linux-src|overlay|post-run-hook|run|smoke0|simArgs)" -$MARSHAL_BIN -d clean $TEST_DIR/$IS_INCLUDE.json | tee -a $LOGNAME -$MARSHAL_BIN -d test -s $TEST_DIR/$IS_INCLUDE.json | tee -a $LOGNAME -if [ ${PIPESTATUS[0]} != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false -else - echo "Success" | tee -a $LOGNAME -fi -echo "" - -# Run the specialized tests (tests that are too complicated for ./marshal -# test) -echo "Running clean test" | tee -a $LOGNAME -./$TEST_DIR/clean/test.py >> $LOGNAME -if [ ${PIPESTATUS[0]} != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false -fi -echo "" - -echo "Running incremental test" | tee -a $LOGNAME -./$TEST_DIR/incremental/test.py >> $LOGNAME -if [ ${PIPESTATUS[0]} != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false - exit 1 -fi -echo "" - -echo "Running inheritance test" | tee -a $LOGNAME -./$TEST_DIR/inherit/test.py >> $LOGNAME -if [ ${PIPESTATUS[0]} != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false - exit 1 -fi -echo "" - -# Ensures that marshal can be called from different PWDs -echo "Running different PWD test" | tee -a $LOGNAME -pushd $TEST_DIR/sameWorkdir -../../marshal test sameDir.json | tee -a $LOGNAME -if [ ${PIPESTATUS[0]} != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false - exit 1 -fi -popd -echo "" - -echo "Running fsSize test" | tee -a $LOGNAME -pushd $TEST_DIR/fsSize -./test.sh | tee -a $LOGNAME -if [ ${PIPESTATUS[0]} != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false - exit 1 -fi -popd -echo "" - -echo "Running recursive make test" | tee -a $LOGNAME -pushd $TEST_DIR/makefile -make -if [ ${PIPESTATUS[0]} != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false - exit 1 -fi -popd -echo "" - -echo "Running workdir test" | tee -a $LOGNAME -pushd $TEST_DIR/testWorkdir -./test.py | tee -a $LOGNAME -if [ ${PIPESTATUS[0]} != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false - exit 1 -fi -popd -echo "" - -echo "Running workload-paths test" | tee -a $LOGNAME -pushd $TEST_DIR/workload-dirs/ -./test.sh | tee -a $LOGNAME -if [ ${PIPESTATUS[0]} != 0 ]; then - echo "Failure" | tee -a $LOGNAME - SUITE_PASS=false - exit 1 -fi -popd -echo "" - -echo -e "\n\nMarshal full test complete. Log at: $LOGNAME" -if [ $SUITE_PASS = false ]; then - echo "FAILURE: Some tests failed" | tee -a $LOGNAME - exit 1 -else - echo "SUCCESS: Full test success" | tee -a $LOGNAME - exit 0 -fi diff --git a/test/bbl-args.json b/test/bbl-args.json new file mode 100644 index 00000000..27ac07f3 --- /dev/null +++ b/test/bbl-args.json @@ -0,0 +1,13 @@ +{ + "name" : "bbl-args", + "workdir" : "bbl", + "base" : "br-base.json", + "firmware" : { + "use-bbl" : true, + "bbl-build-args" : "--enable-logo" + }, + "command" : "echo marshal bbl-args test", + "testing" : { + "refDir" : "bblArgsOutput" + } +} diff --git a/test/bbl-src.json b/test/bbl-src.json new file mode 100644 index 00000000..f7a241bc --- /dev/null +++ b/test/bbl-src.json @@ -0,0 +1,16 @@ +{ + "name" : "bbl-src", + "workdir" : "bbl", + "base" : "br-base.json", + "host-init" : "copy-src.sh", + "firmware" : { + "bbl-src" : "riscv-pk", + "use-bbl" : true + }, + "command" : "echo bbl-src test", + "testing" : { + "refDir" : "bblSrcOutput" + } +} + + diff --git a/test/bbl.json b/test/bbl.json new file mode 100644 index 00000000..25f212f1 --- /dev/null +++ b/test/bbl.json @@ -0,0 +1,11 @@ +{ + "name" : "bbl", + "base" : "br-base.json", + "firmware" : { + "use-bbl" : true + }, + "command" : "echo Global: using bbl", + "testing" : { + "refDir" : "bblOutput" + } +} diff --git a/test/pk-src/.gitignore b/test/bbl/.gitignore similarity index 100% rename from test/pk-src/.gitignore rename to test/bbl/.gitignore diff --git a/test/bbl/README.md b/test/bbl/README.md new file mode 100644 index 00000000..269ab07f --- /dev/null +++ b/test/bbl/README.md @@ -0,0 +1,2 @@ +This test uses the legacy bootloader 'bbl'. For the moment, BBL will hang on +poweroff in Qemu. Only spike simulation is fully supported. diff --git a/test/pk-src/refOutput/pk-src/uartlog b/test/bbl/bblArgsOutput/bbl-args/uartlog similarity index 93% rename from test/pk-src/refOutput/pk-src/uartlog rename to test/bbl/bblArgsOutput/bbl-args/uartlog index 5cfb541f..97c01873 100644 --- a/test/pk-src/refOutput/pk-src/uartlog +++ b/test/bbl/bblArgsOutput/bbl-args/uartlog @@ -19,5 +19,3 @@ rrrrrrrrrrrrrrrr vv rrrrrrrrrrrrrrrr rrrrrrrrrrrrrrrrrr rrrrrrrrrrrrrrrrrr rrrrrrrrrrrrrrrrrrrr rrrrrrrrrrrrrrrrrrrr rrrrrrrrrrrrrrrrrrrrrr rrrrrrrrrrrrrrrrrrrrrr - - INSTRUCTION SETS WANT TO BE FREE diff --git a/test/bbl/bblOutput/bbl/uartlog b/test/bbl/bblOutput/bbl/uartlog new file mode 100644 index 00000000..811ed2da --- /dev/null +++ b/test/bbl/bblOutput/bbl/uartlog @@ -0,0 +1 @@ +Global: using bbl diff --git a/test/bbl/bblSrcOutput/bbl-src/uartlog b/test/bbl/bblSrcOutput/bbl-src/uartlog new file mode 100644 index 00000000..b8baed47 --- /dev/null +++ b/test/bbl/bblSrcOutput/bbl-src/uartlog @@ -0,0 +1 @@ +MARSHAL BBL-SRC TEST diff --git a/test/bbl/copy-src.sh b/test/bbl/copy-src.sh new file mode 100755 index 00000000..2cebb610 --- /dev/null +++ b/test/bbl/copy-src.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +if [ ! -d linux ]; then + rsync --exclude ".git" -r ../../boards/firechip/firmware/riscv-pk . + patch riscv-pk/bbl/bbl.c < test.patch +fi diff --git a/test/bbl/test.patch b/test/bbl/test.patch new file mode 100644 index 00000000..4963206a --- /dev/null +++ b/test/bbl/test.patch @@ -0,0 +1,12 @@ +diff --git bbl/bbl.c bbl/bbl.c +index 3b92fc1..ae3437e 100644 +--- bbl/bbl.c ++++ bbl/bbl.c +@@ -129,6 +129,7 @@ void boot_loader(uintptr_t dtb) + #ifdef PK_PRINT_DEVICE_TREE + fdt_print(dtb_output()); + #endif ++ putstring("\nMARSHAL BBL-SRC TEST\n"); + mb(); + /* Use optional FDT preloaded external payload if present */ + entry_point = kernel_start ? kernel_start : PAYLOAD_START; diff --git a/test/clean/test.py b/test/clean/test.py index 474cd124..aaa0f919 100755 --- a/test/clean/test.py +++ b/test/clean/test.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +"""Call as ./test.py PATH/TO/MARSHAL""" import subprocess as sp import sys @@ -10,13 +11,16 @@ testSrc = pth.Path(__file__).parent testCfg = testSrc.parent / "clean.json" -# Should be the directory containing marshal -managerPath = pth.Path(os.getcwd()) / "marshal" -if not managerPath.exists: - managerPath = pth.Path(os.getcwd()) / "../../marshal" +if len(sys.argv) > 1: + managerPath = pth.Path(sys.argv[1]) +else: + # Should be the directory containing marshal + managerPath = pth.Path(os.getcwd()) / "marshal" if not managerPath.exists: - print("Can't find marshal, this script should be called either from firesim-software/ or firesim-software/test/incremental/", file=sys.stderr) - sys.exit(1) + managerPath = pth.Path(os.getcwd()) / "../../marshal" + if not managerPath.exists: + print("Can't find marshal, this script should be called either from firesim-software/ or firesim-software/test/incremental/", file=sys.stderr) + sys.exit(1) # Safety first kids: Always clean before you test print("Cleaning the test the first time:") diff --git a/test/fsSize/test.py b/test/fsSize/test.py new file mode 100644 index 00000000..85c18b36 --- /dev/null +++ b/test/fsSize/test.py @@ -0,0 +1,51 @@ +import sys +import os +import subprocess as sp +import pathlib as pth +import tempfile + +testSrc = pth.Path(__file__).parent.resolve() +testCfg = testSrc.parent / "fsSize.json" +marshalBin = pth.Path(sys.argv[1]) + + +# def runTest(tDir): +fail = False +try: + print("Generating input files") + try: + sp.run(["dd", "if=/dev/zero", "of=" + str(testSrc / "huge0.dat"), "bs=1M", "count=1024"], check=True) + sp.run(["dd", "if=/dev/zero", "of=" + str(testSrc / "huge1.dat"), "bs=1M", "count=1024"], check=True) + except sp.CalledProcessError as e: + print("Failed to generate input files: ", e) + raise + + print("Testing Workload") + try: + sp.run([marshalBin, "test", testCfg], check=True) + except sp.CalledProcessError as e: + print("Failed build and run workload: ", e) + raise + + try: + sp.run([marshalBin, "clean", testCfg], check=True) + except sp.CalledProcessError as e: + print("Failed cleanup workload. You should manually verify that the image files were deleted (they are very large)") + print(e) + raise + +except Exception as e: + print("Exception while running: ", e) + fail = True + +finally: + try: + os.remove(testSrc / "huge0.dat") + os.remove(testSrc / "huge1.dat") + except FileNotFoundError: + pass + +if fail: + sys.exit(1) +else: + sys.exit(0) diff --git a/test/fsSize/test.sh b/test/fsSize/test.sh deleted file mode 100755 index 91bd732a..00000000 --- a/test/fsSize/test.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Test the too-big part -echo "Testing error handling for too-big files" - -# make some huge files -echo "Generating input files" -dd if=/dev/zero of=huge0.dat bs=1M count=1024 -dd if=/dev/zero of=huge1.dat bs=1M count=1024 - -echo "Running marshal build" -../../marshal test ../fsSize.json -MARSHAL_EXIT=$? - -# This test uses a lot of disk space, clean up after ourselves -echo "Cleaning up" -rm huge*.dat -../../marshal clean ../fsSize.json - -if [ $MARSHAL_EXIT != 0 ]; then - echo "FsSize Test FAILURE" - return $MARSHAL_EXIT -else - echo "FsSize Test SUCCESS" - exit $MARSHAL_EXIT -fi diff --git a/test/generateFiles.json b/test/generateFiles.json new file mode 100644 index 00000000..bc0e9c10 --- /dev/null +++ b/test/generateFiles.json @@ -0,0 +1,13 @@ +{ + "name" : "generateFiles", + "base" : "br-base.json", + "host-init" : "generate.sh", + "overlay" : "overlay", + "command" : "cat /root/overlayInput && cat /root/fileInput && cat /root/generatedFileInput && cat /root/generatedOverlayInput", + "files" : [["fileInput", "/root/"], ["generatedFileInput", "/root/"]], + "post_run_hook" : "cleanup.sh", + "testing" : { + "refDir" : "refOutput", + "strip" : true + } +} diff --git a/test/generateFiles/.gitignore b/test/generateFiles/.gitignore new file mode 100644 index 00000000..057644fa --- /dev/null +++ b/test/generateFiles/.gitignore @@ -0,0 +1,2 @@ +generatedFileInput +overlay/root/generatedOverlayInput diff --git a/test/generateFiles/cleanup.sh b/test/generateFiles/cleanup.sh new file mode 100755 index 00000000..69cef48f --- /dev/null +++ b/test/generateFiles/cleanup.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +WORKDIR=$(dirname "${BASH_SOURCE[0]}") + +rm $WORKDIR/generatedFileInput +rm $WORKDIR/overlay/root/generatedOverlayInput diff --git a/test/generateFiles/fileInput b/test/generateFiles/fileInput new file mode 100644 index 00000000..7d528a78 --- /dev/null +++ b/test/generateFiles/fileInput @@ -0,0 +1 @@ +fileInput diff --git a/test/generateFiles/generate.sh b/test/generateFiles/generate.sh new file mode 100755 index 00000000..ac54742a --- /dev/null +++ b/test/generateFiles/generate.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -e + +echo "generatedFile" > generatedFileInput +echo "generatedOverlay" > overlay/root/generatedOverlayInput diff --git a/test/generateFiles/overlay/root/overlayInput b/test/generateFiles/overlay/root/overlayInput new file mode 100644 index 00000000..23b21002 --- /dev/null +++ b/test/generateFiles/overlay/root/overlayInput @@ -0,0 +1 @@ +overlayInput diff --git a/test/generateFiles/refOutput/generateFiles/uartlog b/test/generateFiles/refOutput/generateFiles/uartlog new file mode 100644 index 00000000..c000c059 --- /dev/null +++ b/test/generateFiles/refOutput/generateFiles/uartlog @@ -0,0 +1,4 @@ +overlayInput +fileInput +generatedFile +generatedOverlay diff --git a/test/hard/.gitignore b/test/hard/.gitignore index 59b32b2f..ed411347 100644 --- a/test/hard/.gitignore +++ b/test/hard/.gitignore @@ -1,2 +1,4 @@ *.o hello +hard.img +hard-bin diff --git a/test/incremental/test.py b/test/incremental/test.py index 10411542..82b5ff86 100755 --- a/test/incremental/test.py +++ b/test/incremental/test.py @@ -20,12 +20,12 @@ # Should be the directory containing marshal if len(sys.argv) != 2: print(usage) - exit(1) + sys.exit(1) managerPath = pth.Path(sys.argv[1]) if not managerPath.exists: print("Provided marshal command does not exist: ",managerPath) - exit(1) + sys.exit(1) # Reset the test, just in case it was left in a weird state sp.check_call(str(managerPath) + " clean " + str(testCfg), shell=True) diff --git a/test/inherit-child.json b/test/inherit-childCopyBin.json similarity index 64% rename from test/inherit-child.json rename to test/inherit-childCopyBin.json index 18a6a670..4e1f9673 100644 --- a/test/inherit-child.json +++ b/test/inherit-childCopyBin.json @@ -1,10 +1,10 @@ { - "name" : "inherit-child", + "name" : "inherit-childCopyBin", "workdir" : "inherit", "base" : "inherit-parent.json", "command" : "cat /root/runOutput", "testing" : { - "refDir" : "refOutput/child", + "refDir" : "refOutput/childCopyBin", "strip" : true } } diff --git a/test/inherit-childOwnBin.json b/test/inherit-childOwnBin.json new file mode 100644 index 00000000..db7d3466 --- /dev/null +++ b/test/inherit-childOwnBin.json @@ -0,0 +1,12 @@ +{ + "name" : "inherit-childOwnBin", + "workdir" : "inherit", + "base" : "inherit-parent.json", + "linux" : { + "config" : "linux.kfrag" + }, + "command" : "gunzip -kc /proc/config.gz | grep 'CONFIG_LOCALVERSION\\|CONFIG_DEFAULT_HOSTNAME'", + "testing" : { + "refDir" : "refOutput/childOwnBin" + } +} diff --git a/test/inherit/linux.kfrag b/test/inherit/linux.kfrag new file mode 100644 index 00000000..b74eaa43 --- /dev/null +++ b/test/inherit/linux.kfrag @@ -0,0 +1,2 @@ +CONFIG_LOCALVERSION="KFRAG_TEST" +CONFIG_DRM_GEM_CMA_HELPER=y diff --git a/test/inherit/refOutput/child/inherit-child/uartlog b/test/inherit/refOutput/childCopyBin/inherit-childCopyBin/uartlog similarity index 100% rename from test/inherit/refOutput/child/inherit-child/uartlog rename to test/inherit/refOutput/childCopyBin/inherit-childCopyBin/uartlog diff --git a/test/inherit/refOutput/childOwnBin/inherit-childOwnBin/uartlog b/test/inherit/refOutput/childOwnBin/inherit-childOwnBin/uartlog new file mode 100644 index 00000000..36ad8e62 --- /dev/null +++ b/test/inherit/refOutput/childOwnBin/inherit-childOwnBin/uartlog @@ -0,0 +1,3 @@ +CONFIG_LOCALVERSION="KFRAG_TEST" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_DEFAULT_HOSTNAME="ucbvax" diff --git a/test/inherit/test.py b/test/inherit/test.py index 21143f3d..fd2457bc 100755 --- a/test/inherit/test.py +++ b/test/inherit/test.py @@ -8,30 +8,58 @@ # Should be the directory containing the test testSrc = pth.Path(__file__).parent.resolve() -testCfg = testSrc.parent / "inherit-child.json" - -print("testSrc:",testSrc) -print("testCfg:",testCfg) -# Should be the directory containing marshal -managerPath = pth.Path(os.getcwd()) / "marshal" -if not managerPath.exists(): - managerPath = testSrc / "../../marshal" +copyBinCfg = testSrc.parent / "inherit-childCopyBin.json" +ownBinCfg = testSrc.parent / "inherit-childOwnBin.json" +parentCfg = testSrc.parent / "inherit-parent.json" + +if len(sys.argv) > 1: + managerPath = pth.Path(sys.argv[1]) +else: + # Should be the directory containing marshal + managerPath = pth.Path(os.getcwd()) / "marshal" if not managerPath.exists(): - print("Can't find marshal, this script should be called either from firemarshal root or firesim-software/test/inherit/", file=sys.stderr) - sys.exit(1) + managerPath = testSrc / "../../marshal" + if not managerPath.exists(): + print("Can't find marshal, this script should be called either from firemarshal root or firesim-software/test/inherit/", file=sys.stderr) + sys.exit(1) + +binDir = managerPath.parent / 'images' # Safety first kids: Always clean before you test print("Cleaning the test:") -if sp.call(str(managerPath) + " clean " + str(testCfg), shell=True) != 0: - print("Test Failure: clean command failed", file=sys.stderr) +try: + sp.run(str(managerPath) + " clean " + str(ownBinCfg), shell=True, check=True) + sp.run(str(managerPath) + " clean " + str(copyBinCfg), shell=True, check=True) + sp.run(str(managerPath) + " clean " + str(parentCfg), shell=True, check=True) +except Exception as e : + print("Test Failure: clean command failed: ", str(e), file=sys.stderr) sys.exit(1) print("Cleaning host-init") -(testSrc / 'runOutput').unlink() +try: + (testSrc / 'runOutput').unlink() +except FileNotFoundError: + pass + +print("Testing child (must rebuild bin) workload:") +if sp.call(str(managerPath) + " test " + str(ownBinCfg), shell=True) != 0: + print("Inherit Test Failure: failed to build inherit-childOwnBin", file=sys.stderr) + sys.exit(1) + +# Parent binary should not have been built since the child had to build its own +if (binDir / "inherit-parent-bin").exists(): + print("Parent was built, Marshal didn't mark the child to build its own binary") + sys.exit(1) + +print("Testing child (can inherit bin) workload:") +if sp.call(str(managerPath) + " test " + str(copyBinCfg), shell=True) != 0: + print("Inherit Test Failure: failed to build childbin", file=sys.stderr) + sys.exit(1) -print("Testing child workload:") -if sp.call(str(managerPath) + " test " + str(testCfg), shell=True) != 0: - print("Clean Test Failure", file=sys.stderr) +# Parent binary should have been built since the child can use it +if not (binDir / "inherit-parent-bin").exists(): + print("parent bin: ", str(binDir / "inherit-parent-bin")) + print("Parent not built, Marshal didn't mark the child to use its parent's binary") sys.exit(1) print("Inherit Test Success", file=sys.stderr) diff --git a/test/kmods.json b/test/kmods.json new file mode 100644 index 00000000..881bc004 --- /dev/null +++ b/test/kmods.json @@ -0,0 +1,15 @@ +{ + "name" : "kmods", + "base" : "br-base.json", + "host-init" : "copy-src.sh", + "command" : "modinfo icenet | grep FireMarshal && modinfo testmod | grep FireMarshal", + "linux" : { + "modules" : { + "icenet" : "test-icenet/", + "testmod" : "testMod" + } + }, + "testing" : { + "refDir" : "refOutput" + } +} diff --git a/test/kmods/.gitignore b/test/kmods/.gitignore new file mode 100644 index 00000000..23c6604d --- /dev/null +++ b/test/kmods/.gitignore @@ -0,0 +1 @@ +test-icenet diff --git a/test/kmods/README b/test/kmods/README new file mode 100644 index 00000000..6883a138 --- /dev/null +++ b/test/kmods/README @@ -0,0 +1,7 @@ +This tests custom kernel module handling in FireMarshal. There are two components to the test: + +1) New Module +testMod is a from-scratch "hello world" style kernel module. + +2) Overwrite existing module +test_icenet is a modified version of the default icenet driver diff --git a/test/kmods/copy-src.sh b/test/kmods/copy-src.sh new file mode 100755 index 00000000..1d11fdb9 --- /dev/null +++ b/test/kmods/copy-src.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +if [ ! -d test-icenet ]; then + rsync --exclude ".git" -r ../../boards/firechip/drivers/icenet-driver/ ./test-icenet + echo 'MODULE_DESCRIPTION("kmod icenet override test");' >> test-icenet/icenet.c +fi diff --git a/test/kmods/refOutput/kmods/uartlog b/test/kmods/refOutput/kmods/uartlog new file mode 100644 index 00000000..6d5114f0 --- /dev/null +++ b/test/kmods/refOutput/kmods/uartlog @@ -0,0 +1,2 @@ +description: FireMarshal test kmod icenet override +description: FireMarshal test of a novel kernel module (not overriding any existing module. diff --git a/test/kmods/testMod/.gitignore b/test/kmods/testMod/.gitignore new file mode 100644 index 00000000..bfd01104 --- /dev/null +++ b/test/kmods/testMod/.gitignore @@ -0,0 +1,38 @@ +*.cmd +*.a +*.asn1.[ch] +*.bin +*.bz2 +*.c.[012]*.* +*.dt.yaml +*.dtb +*.dtb.S +*.dwo +*.elf +*.gcno +*.gz +*.i +*.ko +*.lex.c +*.ll +*.lst +*.lz4 +*.lzma +*.lzo +*.mod.c +*.o +*.o.* +*.order +*.patch +*.s +*.so +*.so.dbg +*.su +*.symtypes +*.tab.[ch] +*.tar +*.xz +Module.symvers +modules.builtin +.tmp_versions +*.mod diff --git a/test/kmods/testMod/Makefile b/test/kmods/testMod/Makefile new file mode 100644 index 00000000..6c3e055e --- /dev/null +++ b/test/kmods/testMod/Makefile @@ -0,0 +1,15 @@ +ifneq ($(KERNELRELEASE),) + +obj-m += testmod.o + +else + +KMAKE=make -C $(LINUXSRC) ARCH=riscv CROSS_COMPILE=riscv64-unknown-linux-gnu- M=$(PWD) + +testmod.ko: testmod.c + $(KMAKE) + +clean: + $(KMAKE) clean + +endif diff --git a/test/kmods/testMod/testmod.c b/test/kmods/testMod/testmod.c new file mode 100644 index 00000000..c5cd4267 --- /dev/null +++ b/test/kmods/testMod/testmod.c @@ -0,0 +1,23 @@ +#include +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Nathan Pemberton"); + +MODULE_DESCRIPTION("FireMarshal test of a novel kernel module (not overriding any existing module."); +MODULE_VERSION("1.00"); + +static int __init testmod_init(void) +{ + printk(KERN_INFO "testmod loaded \n"); + return 0; +} + +static void __exit testmod_exit(void) +{ + return; +} + +module_init(testmod_init); +module_exit(testmod_exit); diff --git a/test/linux-src.json b/test/linux-src.json index bf98168c..18bd5e1a 100644 --- a/test/linux-src.json +++ b/test/linux-src.json @@ -2,7 +2,7 @@ "name" : "linux-src", "base" : "br-base.json", "host-init" : "copy-src.sh", - "linux-src" : "riscv-linux", + "linux-src" : "linux", "command" : "echo Global : command", "post_run_hook" : "./check-output.sh linux-src/uartlog", "testing" : { diff --git a/test/linux-src/.gitignore b/test/linux-src/.gitignore index 77f985cd..fcafcccb 100644 --- a/test/linux-src/.gitignore +++ b/test/linux-src/.gitignore @@ -1 +1 @@ -riscv-linux/ +linux/ diff --git a/test/linux-src/copy-src.sh b/test/linux-src/copy-src.sh index a213a79b..85215df7 100755 --- a/test/linux-src/copy-src.sh +++ b/test/linux-src/copy-src.sh @@ -1,5 +1,7 @@ #!/bin/bash -if [ ! -d riscv-linux ]; then - rsync --exclude ".git" -r ../../riscv-linux . - patch riscv-linux/kernel/reboot.c < test.patch +set -e + +if [ ! -d linux ]; then + rsync --exclude ".git" -r ../../boards/default/linux . + patch linux/kernel/reboot.c < test.patch fi diff --git a/test/makefile/Makefile b/test/makefile/Makefile index d7273412..ba1e6086 100644 --- a/test/makefile/Makefile +++ b/test/makefile/Makefile @@ -1,2 +1,2 @@ test: - cd ../ && ../marshal test makefile.json + cd ../ && $(MARSHALBIN) test makefile.json diff --git a/test/makefile/test.py b/test/makefile/test.py new file mode 100644 index 00000000..ce87a0f2 --- /dev/null +++ b/test/makefile/test.py @@ -0,0 +1,10 @@ +import pathlib as pth +import subprocess as sp +import os +import sys + +testSrc = pth.Path(__file__).parent.resolve() +managerPath = pth.Path(sys.argv[1]) + +os.environ['MARSHALBIN'] = str(managerPath) +sp.run("make", shell=True, cwd=testSrc) diff --git a/test/opensbi-args.json b/test/opensbi-args.json new file mode 100644 index 00000000..76508e6a --- /dev/null +++ b/test/opensbi-args.json @@ -0,0 +1,16 @@ +{ + "name" : "opensbi-args", + "workdir" : "opensbi", + "base" : "br-base.json", + "host-init" : "copy-src.sh", + "firmware" : { + "opensbi-src" : "opensbi", + "opensbi-build-args" : "MARSHAL_TEST=1" + }, + "command" : "echo open-sbi test", + "testing" : { + "refDir" : "argsOutput" + } +} + + diff --git a/test/opensbi-src.json b/test/opensbi-src.json new file mode 100644 index 00000000..ebb51c11 --- /dev/null +++ b/test/opensbi-src.json @@ -0,0 +1,15 @@ +{ + "name" : "opensbi-src", + "workdir" : "opensbi", + "base" : "br-base.json", + "host-init" : "copy-src.sh", + "firmware" : { + "opensbi-src" : "opensbi" + }, + "command" : "echo open-sbi test", + "testing" : { + "refDir" : "srcOutput" + } +} + + diff --git a/test/opensbi/.gitignore b/test/opensbi/.gitignore new file mode 100644 index 00000000..2bf6a7ba --- /dev/null +++ b/test/opensbi/.gitignore @@ -0,0 +1 @@ +opensbi diff --git a/test/opensbi/argsOutput/opensbi-args/uartlog b/test/opensbi/argsOutput/opensbi-args/uartlog new file mode 100644 index 00000000..c79e0634 --- /dev/null +++ b/test/opensbi/argsOutput/opensbi-args/uartlog @@ -0,0 +1 @@ +OpenSBI v42.17 diff --git a/test/opensbi/copy-src.sh b/test/opensbi/copy-src.sh new file mode 100755 index 00000000..09751829 --- /dev/null +++ b/test/opensbi/copy-src.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +if [ ! -d linux ]; then + rsync --exclude ".git" -r ../../boards/firechip/firmware/opensbi . + patch -p0 < test.patch +fi diff --git a/test/opensbi/srcOutput/opensbi-src/uartlog b/test/opensbi/srcOutput/opensbi-src/uartlog new file mode 100644 index 00000000..ac3b4637 --- /dev/null +++ b/test/opensbi/srcOutput/opensbi-src/uartlog @@ -0,0 +1 @@ +OpenSBI v42.8 diff --git a/test/opensbi/test.patch b/test/opensbi/test.patch new file mode 100644 index 00000000..2aa265b6 --- /dev/null +++ b/test/opensbi/test.patch @@ -0,0 +1,29 @@ +diff -ruN opensbi/firmware/objects.mk newopensbi/firmware/objects.mk +--- opensbi/firmware/objects.mk 2020-10-01 15:36:26.057869489 -0400 ++++ newopensbi/firmware/objects.mk 2020-10-01 15:35:12.271684120 -0400 +@@ -51,3 +51,7 @@ + ifdef FW_OPTIONS + firmware-genflags-y += -DFW_OPTIONS=$(FW_OPTIONS) + endif ++ ++ifdef MARSHAL_TEST ++firmware-genflags-y += -DMARSHAL_TEST ++endif +diff -ruN opensbi/include/sbi/sbi_version.h newopensbi/include/sbi/sbi_version.h +--- opensbi/include/sbi/sbi_version.h 2020-10-01 15:36:19.193759202 -0400 ++++ newopensbi/include/sbi/sbi_version.h 2020-10-01 15:35:12.391686048 -0400 +@@ -10,8 +10,13 @@ + #ifndef __SBI_VERSION_H__ + #define __SBI_VERSION_H__ + +-#define OPENSBI_VERSION_MAJOR 0 ++#define OPENSBI_VERSION_MAJOR 42 ++ ++#ifdef MARSHAL_TEST ++#define OPENSBI_VERSION_MINOR 17 ++#else + #define OPENSBI_VERSION_MINOR 8 ++#endif + + /** + * OpenSBI 32-bit version with: diff --git a/test/pk-src.json b/test/pk-src.json deleted file mode 100644 index 560a32a0..00000000 --- a/test/pk-src.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "name" : "pk-src", - "base" : "br-base.json", - "host-init" : "copy-src.sh", - "pk-src" : "riscv-pk", - "command" : "echo Global : command", - "testing" : { - "refDir" : "refOutput" - } -} diff --git a/test/pk-src/copy-src.sh b/test/pk-src/copy-src.sh deleted file mode 100755 index c89fd8ca..00000000 --- a/test/pk-src/copy-src.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -e - -if [ ! -d riscv-pk ]; then - rsync --exclude ".git" -r ../../riscv-pk . - patch riscv-pk/bbl/bbl.c < test.patch -fi diff --git a/test/pk-src/test.patch b/test/pk-src/test.patch deleted file mode 100644 index c9b7cf7f..00000000 --- a/test/pk-src/test.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/bbl/bbl.c b/bbl/bbl.c -index 1b96a9d..8d82e7d 100644 ---- a/bbl/bbl.c -+++ b/bbl/bbl.c -@@ -7,6 +7,8 @@ - #include "fdt.h" - #include - -+#define PK_ENABLE_LOGO -+ - static const void* entry_point; - long disabled_hart_mask; - diff --git a/test/sameWorkdir/test.py b/test/sameWorkdir/test.py new file mode 100644 index 00000000..17aeefd2 --- /dev/null +++ b/test/sameWorkdir/test.py @@ -0,0 +1,22 @@ +import subprocess as sp +import sys +import os +import pathlib as pth +import re + +# Should be the directory containing the test +testSrc = pth.Path(__file__).parent.resolve() +# testCfg = testSrc / "sameDir.json" +os.chdir(testSrc) + +managerPath = pth.Path(sys.argv[1]) + +if sp.call(str(managerPath) + " test sameDir.json", shell=True) != 0: + print("Clean Test Failure", file=sys.stderr) + sys.exit(1) +else: + print("Success") + sys.exit(0) + + + diff --git a/test/testWorkdir/test.py b/test/testWorkdir/test.py index 8864e3ca..e3a3d15a 100755 --- a/test/testWorkdir/test.py +++ b/test/testWorkdir/test.py @@ -8,31 +8,29 @@ # Should be the directory containing the test testSrc = pth.Path(__file__).parent -testCfg = testSrc.parent / "testWorkdir.json" +testCfg = testSrc / "testWorkdir.json" -# Should be the directory containing marshal -managerPath = pth.Path(os.getcwd()) / "marshal" -if not managerPath.exists(): - managerPath = pth.Path(os.getcwd()) / "../../marshal" - if not managerPath.exists: - print("Can't find marshal, this script should be called either from FireMarshal/ or FireMarshal/test/testWorkload/", file=sys.stderr) - sys.exit(1) +if len(sys.argv) > 1: + managerPath = pth.Path(sys.argv[1]) +else: + # Should be the directory containing marshal + managerPath = pth.Path(os.getcwd()) / "marshal" + if not managerPath.exists(): + managerPath = pth.Path(os.getcwd()) / "../../marshal" + if not managerPath.exists: + print("Can't find marshal, this script should be called either from FireMarshal/ or FireMarshal/test/testWorkload/", file=sys.stderr) + sys.exit(1) -print(str(managerPath)) # Safety first kids: Always clean before you test print("cleaning testWorkload test") -cleanCmd = [str(managerPath), "--workdir", "../", "clean", str(testCfg)] +cleanCmd = [str(managerPath), "--workdir", "../", "clean", "testWorkdir.json"] print(" ".join(cleanCmd)) -if sp.call(cleanCmd) != 0: - print("Clean Test Failure: the first clean command failed", file=sys.stderr) - sys.exit(1) +sp.run(cleanCmd, cwd=testSrc, check=True) print("Building workload with non-local workload bases") -testCmd = [str(managerPath), "--workdir", "../", "test", str(testCfg)] +testCmd = [str(managerPath), "--workdir", "../", "test", "testWorkdir.json"] print(" ".join(testCmd)) -if sp.call(testCmd) != 0: - print("Clean Test Failure: first run of test failed", file=sys.stderr) - sys.exit(1) +sp.run(testCmd, cwd=testSrc, check=True) print("testWorkdir test Success", file=sys.stderr) sys.exit() diff --git a/test/workload-dirs/.gitignore b/test/workload-dirs/.gitignore new file mode 100644 index 00000000..7f489216 --- /dev/null +++ b/test/workload-dirs/.gitignore @@ -0,0 +1 @@ +!marshal-config.yaml diff --git a/test/workload-dirs/marshal-config.yaml b/test/workload-dirs/marshal-config.yaml new file mode 100644 index 00000000..3107456c --- /dev/null +++ b/test/workload-dirs/marshal-config.yaml @@ -0,0 +1 @@ +workload-dirs: [../] diff --git a/test/workload-dirs/test.py b/test/workload-dirs/test.py new file mode 100644 index 00000000..d9e76618 --- /dev/null +++ b/test/workload-dirs/test.py @@ -0,0 +1,9 @@ +import subprocess as sp +import sys +import os +import pathlib as pth + +testSrc = pth.Path(__file__).parent.resolve() +managerPath = pth.Path(sys.argv[1]) + +sp.run([managerPath, "test", "command.json"], check=True, cwd=testSrc) diff --git a/test/workload-dirs/test.sh b/test/workload-dirs/test.sh deleted file mode 100755 index 0b6cc0eb..00000000 --- a/test/workload-dirs/test.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -set -e - -TESTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -pushd $TESTDIR - -marshal test command.json - -popd diff --git a/wlutil/br/br.py b/wlutil/br/br.py index 93dcc808..d7bd7950 100644 --- a/wlutil/br/br.py +++ b/wlutil/br/br.py @@ -104,6 +104,7 @@ def fileDeps(self): # List all files that should be checked to determine if BR is uptodate deps = [] deps.append(br_dir / 'buildroot-config') + deps.append(br_dir / 'busybox-config') deps.append(pathlib.Path(__file__)) return deps diff --git a/wlutil/br/busybox-config b/wlutil/br/busybox-config index 650d313f..18091ff6 100644 --- a/wlutil/br/busybox-config +++ b/wlutil/br/busybox-config @@ -534,7 +534,7 @@ CONFIG_LSATTR=y CONFIG_INSMOD=y CONFIG_LSMOD=y CONFIG_FEATURE_LSMOD_PRETTY_2_6_OUTPUT=y -# CONFIG_MODINFO is not set +CONFIG_MODINFO=y CONFIG_MODPROBE=y # CONFIG_FEATURE_MODPROBE_BLACKLIST is not set # CONFIG_FEATURE_MODPROBE_SMALL_OPTIONS_ON_CMDLINE is not set diff --git a/wlutil/build.py b/wlutil/build.py index dcaf5a7e..2cd55eaf 100644 --- a/wlutil/build.py +++ b/wlutil/build.py @@ -53,11 +53,93 @@ def handlePostBin(config, linuxBin): # add linux src and bin path to the environment postbinEnv = os.environ.copy() - postbinEnv.update({'FIREMARSHAL_LINUX_SRC' : config.get('linux-src').as_posix()}) - postbinEnv.update({'FIREMARSHAL_LINUX_BIN' : linuxBin}) + if 'linux' in config: + postbinEnv.update({'FIREMARSHAL_LINUX_SRC' : config['linux']['source'].as_posix()}) + postbinEnv.update({'FIREMARSHAL_LINUX_BIN' : linuxBin}) run([config['post-bin'].path] + config['post-bin'].args, env=postbinEnv, cwd=config['workdir']) + +def submoduleDepsTask(submodules, name=""): + """Returns a calc_dep task for doit to check if submodule is up to date. + Packaging this in a calc_dep task avoids unnecessary checking that can be + slow.""" + def submoduleDeps(submodules): + return { 'uptodate' : [ config_changed(checkGitStatus(sub)) for sub in submodules ] } + + return { + 'name' : name, + 'actions' : [ (submoduleDeps, [ submodules ]) ] + } + + +def kmodDepsTask(cfg, taskDeps=None, name=""): + """Check if the kernel modules in cfg are uptodate (suitable for doit's calc_dep function)""" + + def checkMods(cfg): + log = logging.getLogger() + for driverDir in cfg['linux']['modules'].values(): + if not driverDir.exists(): + log.warn("WARNING: Required module " + str(driverDir) + " does not exist: Assuming the workload is not uptodate.") + return False + try: + p = run(["make", "-q", "LINUXSRC=" + str(cfg['linux']['source'])], cwd=driverDir, check=False) + + if p.returncode != 0: + return False + except Exception as e: + log.warn("WARNING: Error when checking if module " + str(driverDir) + " is up to date: Assuming workload is not up to date. Error: " + str(e)) + return False + + return True + + def calcModsAction(cfg): + return { 'uptodate' : [ checkMods(cfg) ] } + + task = { + 'name' : name, + 'actions' : [ (calcModsAction, [ cfg ]) ] + } + + if taskDeps is not None: + task['task_dep'] = taskDeps + + return task + + +def fileDepsTask(name, taskDeps=None, overlay=None, files=None): + """Returns a task dict for a calc_dep task that calculates the file + dependencies representd by an overlay and/or a list of FileSpec objects. + Either can be None. + + taskDeps should be a list of names of tasks that must run before + calculating dependencies (e.g. host-init)""" + + def fileDeps(overlay, files): + """The python-action for the filedeps task, returns a dictionary of dependencies""" + deps = [] + if overlay is not None: + deps.append(overlay) + + if files is not None: + deps += [ f.src for f in files if not f.src.is_symlink() ] + + for dep in deps.copy(): + if dep.is_dir(): + deps += [ child for child in dep.glob('**/*') if not child.is_symlink()] + + return { 'file_dep' : [ str(f) for f in deps if not f.is_dir() ] } + + task = { + 'name' : 'calc_' + name + '_dep', + 'actions' : [ (fileDeps, [overlay, files]) ], + } + if taskDeps is not None: + task['task_dep'] = taskDeps + + return task + + def addDep(loader, config): """Adds 'config' to the doit dependency graph ('loader')""" @@ -75,11 +157,14 @@ def addDep(loader, config): bin_file_deps = [] bin_task_deps = [] + hostInit + config['base-deps'] bin_targets = [] - if 'linux-config' in config: - bin_file_deps += config['linux-config'] + if 'linux' in config: + bin_file_deps += config['linux']['config'] bin_task_deps.append('BuildBusybox') bin_targets.append(config['dwarf']) + if config['use-parent-bin']: + bin_task_deps.append(str(config['base-bin'])) + diskBin = [] if 'bin' in config: if 'dwarf' in config: @@ -87,14 +172,29 @@ def addDep(loader, config): else: targets = [str(config['bin'])] + moddeps = [config.get('pk-src')] + if 'firmware' in config: + moddeps.append(config['firmware']['source']) + + bin_calc_dep_tsks = [ + submoduleDepsTask(moddeps, name="_submodule_deps_"+config['name']), + ] + + + if 'linux' in config: + moddeps.append(config['linux']['source']) + bin_calc_dep_tsks.append(kmodDepsTask(config, name="_kmod_deps_"+config['name'])) + + for tsk in bin_calc_dep_tsks: + loader.addTask(tsk) + loader.addTask({ 'name' : str(config['bin']), 'actions' : [(makeBin, [config])], 'targets' : targets, 'file_dep': bin_file_deps, 'task_dep' : bin_task_deps, - 'uptodate' : [config_changed(checkGitStatus(config.get('linux-src'))), - config_changed(checkGitStatus(config.get('pk-src')))] + 'calc_dep' : [ tsk['name'] for tsk in bin_calc_dep_tsks ] }) diskBin = [str(config['bin'])] @@ -112,14 +212,19 @@ def addDep(loader, config): else: targets = [str(noDiskPath(config['bin']))] + uptodate = [] + if 'firmware' in config: + uptodate.append(config_changed(checkGitStatus(config['firmware']['source']))) + if 'linux' in config: + uptodate.append(config_changed(checkGitStatus(config['linux']['source']))) + loader.addTask({ 'name' : str(noDiskPath(config['bin'])), 'actions' : [(makeBin, [config], {'nodisk' : True})], 'targets' : targets, 'file_dep': nodisk_file_deps, 'task_dep' : nodisk_task_deps, - 'uptodate' : [config_changed(checkGitStatus(config.get('linux-src'))), - config_changed(checkGitStatus(config.get('pk-src')))] + 'uptodate' : uptodate }) nodiskBin = [str(noDiskPath(config['bin']))] @@ -138,21 +243,15 @@ def addDep(loader, config): # Add a rule for the image (if any) img_file_deps = [] img_task_deps = [] + hostInit + postBin + config['base-deps'] + img_calc_deps = [] if 'img' in config: - if 'files' in config: - for fSpec in config['files']: - # Add directories recursively - if fSpec.src.is_dir(): - for root, dirs, files in os.walk(fSpec.src): - for f in files: - fdep = os.path.join(root, f) - # Ignore symlinks - if not os.path.islink(fdep): - img_file_deps.append(fdep) - else: - # Ignore symlinks - if not os.path.islink(fSpec.src): - img_file_deps.append(fSpec.src) + if 'files' in config or 'overlay' in config: + # We delay calculation of files and overlay dependencies to runtime + # in order to catch any generated inputs + fdepsTask = fileDepsTask(config['name'], taskDeps=img_task_deps, + overlay=config.get('overlay'), files=config.get('files')) + img_calc_deps.append(fdepsTask['name']) + loader.addTask(fdepsTask) if 'guest-init' in config: img_file_deps.append(config['guest-init'].path) img_task_deps.append(str(config['bin'])) @@ -166,7 +265,8 @@ def addDep(loader, config): 'actions' : [(makeImage, [config])], 'targets' : [config['img']], 'file_dep' : img_file_deps, - 'task_dep' : img_task_deps + 'task_dep' : img_task_deps, + 'calc_dep' : img_calc_deps }) # Generate a task-graph loader for the doit "Run" command @@ -239,8 +339,10 @@ def buildWorkload(cfgName, cfgs, buildBin=True, buildImg=True): if 'img' in jCfg and buildImg: imgList.append(jCfg['img']) + opts = {**getOpt('doitOpts'), **{'check_file_uptodate': WithMetadataChecker}} + doitHandle = doit.doit_cmd.DoitMain(taskLoader, extra_config={'run': opts}) + # The order isn't critical here, we should have defined the dependencies correctly in loader - doitHandle = doit.doit_cmd.DoitMain(taskLoader, extra_config={'run': getOpt('doitOpts')}) return doitHandle.run([str(p) for p in binList + imgList]) def makeInitramfs(srcs, cpioDir, includeDevNodes=False): @@ -287,6 +389,7 @@ def generateKConfig(kfrags, linuxSrc): run([linuxSrc / 'scripts/kconfig/merge_config.sh', str(defCfg)] + list(map(str, kfrags)), env=kconfigEnv, cwd=linuxSrc) + def makeInitramfsKfrag(src, dst): with open(dst, 'w') as f: f.write("CONFIG_BLK_DEV_INITRD=y\n") @@ -294,26 +397,22 @@ def makeInitramfsKfrag(src, dst): f.write('CONFIG_INITRAMFS_COMPRESSION_LZO=y\n') f.write('CONFIG_INITRAMFS_SOURCE="' + str(src) + '"\n') -def makeDrivers(kfrags, boardDir, linuxSrc): - """Build all the drivers for this linux source on the specified board. - Returns a path to a cpio archive containing all the drivers in - /lib/modules/KERNELVERSION/*.ko - kfrags: list of paths to kernel configuration fragments to use when building drivers - boardDir: Path to the board directory. Should have a 'drivers/' subdir - containing all the drivers we should build for this board - linuxSrc: Path to linux source tree to build against - """ +def makeModules(cfg): + """Build all the kernel modules for this config. The compiled kmods will be + put in the appropriate location in the initramfs staging area.""" + + linCfg = cfg['linux'] - makeCmd = "make LINUXSRC=" + str(linuxSrc) + makeCmd = "make LINUXSRC=" + str(linCfg['source']) - # Prepare the linux source for building external drivers - generateKConfig(kfrags, linuxSrc) - run(["make"] + getOpt('linux-make-args') + ["modules_prepare", getOpt('jlevel')], cwd=linuxSrc) - kernelVersion = sp.run(["make", "-s", "ARCH=riscv", "kernelrelease"], cwd=linuxSrc, stdout=sp.PIPE, universal_newlines=True).stdout.strip() + # Prepare the linux source for building external modules + generateKConfig(linCfg['config'], linCfg['source']) + run(["make"] + getOpt('linux-make-args') + ["modules_prepare", getOpt('jlevel')], cwd=linCfg['source']) + kernelVersion = sp.run(["make", "-s", "ARCH=riscv", "kernelrelease"], cwd=linCfg['source'], stdout=sp.PIPE, universal_newlines=True).stdout.strip() drivers = [] - for driverDir in getOpt('driver-dirs'): + for driverDir in linCfg['modules'].values(): checkSubmodule(driverDir) # Drivers don't seem to detect changes in the kernel @@ -337,6 +436,47 @@ def makeDrivers(kfrags, boardDir, linuxSrc): # Setup the dependency file needed by modprobe to load the drivers run(['depmod', '-b', str(getOpt('initramfs-dir') / "drivers"), kernelVersion]) + +def makeBBL(config, nodisk=False): + # BBL doesn't seem to detect changes in its configuration and won't rebuild if the payload path changes + bblBuild = config['firmware']['source'] / 'build' + if bblBuild.exists(): + shutil.rmtree(bblBuild) + bblBuild.mkdir() + + configureArgs = ['--host=riscv64-unknown-elf', + '--with-payload=' + str(config['linux']['source'] / 'vmlinux')] + + if 'bbl-build-args' in config['firmware']: + configureArgs += config['firmware']['bbl-build-args'] + + run(['../configure'] + configureArgs, cwd=bblBuild) + run(['make', getOpt('jlevel')], cwd=bblBuild) + + return bblBuild / 'bbl' + + +def makeOpenSBI(config, nodisk=False): + payload = config['linux']['source'] / 'arch' / 'riscv' / 'boot' / 'Image' + # Align to next MiB + payloadSize = ((payload.stat().st_size + 0xfffff) // 0x100000) * 0x100000 + + args = getOpt('linux-make-args') + ['PLATFORM=generic', + 'FW_PAYLOAD_PATH=' + str(payload), + 'FW_PAYLOAD_FDT_ADDR=0x$(shell printf "%X" ' + '$$(( $(FW_TEXT_START) + $(FW_PAYLOAD_OFFSET) + ' + hex(payloadSize) + ' )))'] + + if 'opensbi-build-args' in config['firmware']: + args += config['firmware']['opensbi-build-args'] + + run(['make'] + + getOpt('linux-make-args') + args, + cwd=config['firmware']['source'] + ) + + return config['firmware']['source'] / 'build' / 'platform' / 'generic' / 'firmware' / 'fw_payload.elf' + + def makeBin(config, nodisk=False): """Build the binary specified in 'config'. @@ -345,15 +485,22 @@ def makeBin(config, nodisk=False): log = logging.getLogger() + if config['use-parent-bin'] and not nodisk: + shutil.copy(config['base-bin'], config['bin']) + if 'dwarf' in config: + shutil.copy(config['base-dwarf'], config['dwarf']) + return True + # We assume that if you're not building linux, then the image is pre-built (e.g. during host-init) - if 'linux-config' in config: + if 'linux' in config: initramfsIncludes = [] # Some submodules are only needed if building Linux try: - checkSubmodule(config['linux-src']) - checkSubmodule(config['pk-src']) - makeDrivers(config['linux-config'], getOpt('board-dir'), config['linux-src']) + checkSubmodule(config['linux']['source']) + checkSubmodule(config['firmware']['source']) + + makeModules(config) except SubmoduleError as err: return doit.exceptions.TaskFailed(err) @@ -372,25 +519,20 @@ def makeBin(config, nodisk=False): initramfsPath = makeInitramfs(initramfsIncludes, cpioDir, includeDevNodes=True) makeInitramfsKfrag(initramfsPath, cpioDir / "initramfs.kfrag") - generateKConfig(config['linux-config'] + [cpioDir / "initramfs.kfrag"], config['linux-src']) - run(['make'] + getOpt('linux-make-args') + ['vmlinux', getOpt('jlevel')], cwd=config['linux-src']) + generateKConfig(config['linux']['config'] + [cpioDir / "initramfs.kfrag"], config['linux']['source']) + run(['make'] + getOpt('linux-make-args') + ['vmlinux', 'Image', getOpt('jlevel')], cwd=config['linux']['source']) - # BBL doesn't seem to detect changes in its configuration and won't rebuild if the payload path changes - pk_build = (config['pk-src'] / 'build') - if pk_build.exists(): - shutil.rmtree(pk_build) - pk_build.mkdir() - - run(['../configure', '--host=riscv64-unknown-elf', - '--with-payload=' + str(config['linux-src'] / 'vmlinux')], cwd=pk_build) - run(['make', getOpt('jlevel')], cwd=pk_build) + if 'use-bbl' in config.get('firmware', {}) and config['firmware']['use-bbl']: + fw = makeBBL(config, nodisk) + else: + fw = makeOpenSBI(config, nodisk) if nodisk: - shutil.copy(pk_build / 'bbl', noDiskPath(config['bin'])) - shutil.copy(config['linux-src'] / 'vmlinux', noDiskPath(config['dwarf'])) + shutil.copy(fw, noDiskPath(config['bin'])) + shutil.copy(config['linux']['source'] / 'vmlinux', noDiskPath(config['dwarf'])) else: - shutil.copy(pk_build / 'bbl', config['bin']) - shutil.copy(config['linux-src'] / 'vmlinux', config['dwarf']) + shutil.copy(fw, config['bin']) + shutil.copy(config['linux']['source'] / 'vmlinux', config['dwarf']) return True @@ -406,6 +548,10 @@ def makeImage(config): if config['img-sz'] != 0: resizeFS(config['img'], config['img-sz']) + if 'overlay' in config: + log.info("Applying Overlay: " + str(config['overlay'])) + applyOverlay(config['img'], config['overlay']) + if 'files' in config: log.info("Applying file list: " + str(config['files'])) copyImgFiles(config['img'], config['files'], 'in') diff --git a/wlutil/busybox-config b/wlutil/busybox-config index e411d363..7a2c2209 100644 --- a/wlutil/busybox-config +++ b/wlutil/busybox-config @@ -559,7 +559,7 @@ CONFIG_LSATTR=y CONFIG_INSMOD=y CONFIG_LSMOD=y CONFIG_FEATURE_LSMOD_PRETTY_2_6_OUTPUT=y -# CONFIG_MODINFO is not set +CONFIG_MODINFO=y CONFIG_MODPROBE=y # CONFIG_FEATURE_MODPROBE_BLACKLIST is not set CONFIG_RMMOD=y diff --git a/wlutil/config.py b/wlutil/config.py index 28ba7752..ea0630c9 100644 --- a/wlutil/config.py +++ b/wlutil/config.py @@ -9,6 +9,7 @@ import humanfriendly as hf from .wlutil import * import pathlib as pth +import copy # This is a comprehensive list of all user-defined config options # Note that paths direct from a config file are relative to workdir, but will @@ -27,13 +28,10 @@ 'qemu', # Optional extra arguments to qemu 'qemu-args', - # Path to riscv-linux source to use (defaults to the included linux) - 'linux-src', - # Path to linux configuration fragments to use (can be a list or scalar - # string from the user but will be converted to a list of pathlib.Path) - 'linux-config', - # Path to riscv-pk (used for bbl) - 'pk-src', + # Grouped linux options see the docs for subfields + 'linux', + # Grouped firmware-related options + 'firmware', # Path to script to run on host before building this config 'host-init', # Path to script to run on host after building the binary @@ -68,6 +66,14 @@ 'mem' ] +# Deprecated options, will be translated to current equivalents early on in +# loading. They can be ignored after that. +configDeprecated = [ + 'linux-config', + 'linux-src', + 'pk-src' + ] + # This is a comprehensive list of all options set during config parsing # (but not explicitly provided by the user) configDerived = [ @@ -82,12 +88,15 @@ 'distro', # Base linux distribution (either 'fedora' or 'br') 'initramfs', # boolean: should we use an initramfs with this config? 'jobs', # After parsing, jobs is a collections.OrderedDict containing 'Config' objects for each job. - 'base-deps' # A list of tasks that this workload needs from its base (a potentially empty list) + 'base-deps', # A list of tasks that this workload needs from its base (a potentially empty list) + 'firmware-src', # A convenience field that points to whatever firmware is configured (see 'use-bbl' to determine which it is) + 'use-parent-bin', # Child would build the exact same binary as the parent, just copy it instead of rebuilding. ] # These are the user-defined options that should be converted to absolute # paths (from workload-relative). Derived options are already absolute. -configToAbs = ['overlay', 'linux-src', 'pk-src', 'cfg-file', 'bin', 'img', 'spike', 'qemu'] +configToAbs = ['overlay', 'bbl-src', 'cfg-file', 'bin', 'img', 'spike', 'qemu'] + # These are the options that should be inherited from base configs (if not # explicitly provided). Additional options may also be inherited if they require @@ -97,8 +106,10 @@ 'runSpec', 'files', 'outputs', - 'linux-src', - 'pk-src', + 'bbl-src', + 'bbl-build-args', + 'opensbi-src', + 'opensbi-build-args', 'builder', 'distro', 'spike', @@ -112,6 +123,7 @@ 'cpus', 'mem'] + # These are the permissible base-distributions to use (they get treated special) distros = { 'fedora' : fed.Builder(), @@ -128,6 +140,22 @@ 'cpus' : 4 # same as firesim default target } +# Members of the 'linux' option in the config +configLinux = [ + "source", # Path to linux source code to use + "config", # Path to kfrag to apply over bases + "modules" # Dictionary of kernel modules to build and load {MODULE_NAME : PATH_TO_MODULE} + ] + +# Members of the 'firmware' option +configFirmware = [ + "use-bbl", # Use bbl as firmware instead of openSBI + "bbl-src", # Alternative source directory for bbl + "bbl-build-args", # Additional arguments to configure script for bbl. User provides string, cannonical form is list. + "opensbi-src", # Alternative source directory for openSBI + "opensbi-build-args", # Additional arguments to make for openSBI. User provides string, cannonical form is list. + ] + class RunSpec(): def __init__(self, script=None, command=None, args=[]): """RunSpec represents a command or script to run in the target. @@ -158,8 +186,8 @@ def fromString(self, command, baseDir=pathlib.Path('.')): def __repr__(self): return "RunSpec(" + \ - ' path: ' + self.path + \ - ', command: ' + self.command + \ + ' path: ' + str(self.path) + \ + ', command: ' + str(self.command) + \ ', args ' + str(self.args) + \ ')' @@ -181,6 +209,109 @@ def cleanPath(path, workdir): return path +def translateDeprecated(config): + """Replace all deprecated options with their more current equivalents. This + function has no dependencies on prior parsing of configs, it can run + against the raw config dict from the user. After this, there will be only + one cannonical representation for each option and deprecated options will + not be present in the config.""" + + # linux stuff + # Handle deprecated standalone linux-config and linux-src options (they now live entirely in the linux dict) + if 'linux' not in config: + if 'linux-config' in config or 'linux-src' in config: + config['linux'] = {} + if 'linux-src' in config: + config['linux']['source'] = config['linux-src'] + if 'linux-config' in config: + config['linux']['config'] = config['linux-config'] + elif 'linux-config' in config or 'linux-src' in config: + log.warning("The deprecated 'linux-config' and 'linux-src' options are mutually exclusive with the 'linux' option; ignoring") + + # Firmware stuff + if 'pk-src' in config: + if 'firmware' not in config: + config['firmware'] = {'bbl-src' : config['pk-src']} + else: + log.warning("The deprecated 'pk-src' option is mutually exclusive with the 'firmware' option; ignoring") + + # Now that they're translated, remove all deprecated options from config + for opt in configDeprecated: + config.pop(opt, None) + + +def initLinuxOpts(config): + """Initialize the 'linux' option group of config""" + if 'linux' not in config: + return + + if 'config' in config['linux']: + if isinstance(config['linux']['config'], list): + config['linux']['config'] = [ cleanPath(p, config['workdir']) for p in config['linux']['config'] ] + else: + config['linux']['config'] = [ cleanPath(config['linux']['config'], config['workdir']) ] + + if 'source' in config['linux']: + config['linux']['source'] = cleanPath(config['linux']['source'], config['workdir']) + + if 'modules' in config['linux']: + config['linux']['modules'] = { name : cleanPath(path, config['workdir']) for name, path in config['linux']['modules'].items() } + + +def inheritLinuxOpts(config, baseCfg): + """Apply the linux options from baseCfg to config. This also finalizes + the linux config, including applying defaults (which can't be applied + until we've inherited).""" + + if 'linux' not in config and 'linux' in baseCfg: + config['linux'] = copy.deepcopy(baseCfg['linux']) + elif 'linux' in config and 'linux' in baseCfg: + # both have a 'linux' option, handle inheritance for each suboption + if 'config' in baseCfg['linux'] and 'config' in config['linux']: + # Order matters here! Later kfrags take precedence over earlier. + config['linux']['config'] = baseCfg['linux']['config'] + config['linux']['config'] + + if 'modules' in baseCfg['linux'] and 'modules' in config['linux']: + config['linux']['modules'] = {**baseCfg['linux']['modules'], **config['linux']['modules']} + + for k, v in baseCfg['linux'].items(): + if k not in config['linux']: + config['linux'][k] = copy.copy(v) + + +def initFirmwareOpts(config): + """Initialize the 'firmware' option group""" + if 'firmware' not in config: + return + + for opt in ['bbl-src', 'opensbi-src']: + if opt in config['firmware']: + config['firmware'][opt] = cleanPath(config['firmware'][opt], config['workdir']) + + for opt in ['bbl-build-args', 'opensbi-build-args']: + if opt in config['firmware']: + config['firmware'][opt] = config['firmware'][opt].split() + + +def inheritFirmwareOpts(config, baseCfg): + """Apply the firmware options from baseCfg to config.""" + + if 'firmware' not in config and 'firmware' in baseCfg: + config['firmware'] = copy.deepcopy(baseCfg['firmware']) + elif 'firmware' in config and 'firmware' in baseCfg: + for k, v in baseCfg['firmware'].items(): + if k not in config['firmware']: + config['firmware'][k] = copy.copy(v) + elif k in ['bbl-build-args', 'opensbi-build-args']: + config['firmware'][k] = baseCfg['firmware'][k] + config['firmware'][k] + + if 'firmware' in config: + if config['firmware'].get('use-bbl', False): + config['firmware']['source'] = config['firmware']['bbl-src'] + else: + config['firmware']['source'] = config['firmware']['opensbi-src'] + + class Config(collections.MutableMapping): # Configs are assumed to be partially initialized until this is explicitly # set. @@ -195,7 +326,9 @@ class Config(collections.MutableMapping): # Post: # - All paths will be absolute # - Jobs will be a dictionary of { 'name' : Config } for each job + # - All options will be in the cannonical form or not in the dictionary if undefined def __init__(self, cfgFile=None, cfgDict=None): + if cfgFile != None: with open(cfgFile, 'r') as f: self.cfg = json.load(f) @@ -203,13 +336,12 @@ def __init__(self, cfgFile=None, cfgDict=None): else: self.cfg = cfgDict + translateDeprecated(self.cfg) + cfgDir = None if 'cfg-file' in self.cfg: cfgDir = self.cfg['cfg-file'].parent - # Some default values - self.cfg['base-deps'] = [] - if 'workdir' in self.cfg: self.cfg['workdir'] = pathlib.Path(self.cfg['workdir']) if not self.cfg['workdir'].is_absolute(): @@ -219,20 +351,21 @@ def __init__(self, cfgFile=None, cfgDict=None): assert('cfg-file' in self.cfg), "No workdir or cfg-file provided" self.cfg['workdir'] = cfgDir / self.cfg['name'] - if 'nodisk' not in self.cfg: - # Note that sw_manager may set this back to true if the user passes command line options - self.cfg['nodisk'] = False - # Convert stuff to absolute paths (this should happen as early as # possible because the next steps all assume absolute paths) for k in (set(configToAbs) & set(self.cfg.keys())): self.cfg[k] = cleanPath(self.cfg[k], self.cfg['workdir']) - if 'linux-config' in self.cfg: - if isinstance(self.cfg['linux-config'], list): - self.cfg['linux-config'] = [ cleanPath(p, self.cfg['workdir']) for p in self.cfg['linux-config'] ] - else: - self.cfg['linux-config'] = [ cleanPath(self.cfg['linux-config'], self.cfg['workdir']) ] + initLinuxOpts(self.cfg) + initFirmwareOpts(self.cfg) + + # Some default values + self.cfg['base-deps'] = [] + self.cfg['use-parent-bin'] = False + + if 'nodisk' not in self.cfg: + # Note that sw_manager may set this back to true if the user passes command line options + self.cfg['nodisk'] = False if 'rootfs-size' in self.cfg: self.cfg['img-sz'] = hf.parse_size(str(self.cfg['rootfs-size'])) @@ -247,13 +380,6 @@ def __init__(self, cfgFile=None, cfgDict=None): self.cfg['files'] = fList - # Convert overlay to file list. Internal code can safely ignore the 'overlay' argument now. - if 'overlay' in self.cfg: - self.cfg.setdefault('files', []) - files = self.cfg['overlay'].glob('*') - for f in files: - self.cfg['files'].append(FileSpec(src=f, dst=pathlib.Path('/'))) - if 'outputs' in self.cfg: self.cfg['outputs'] = [ pathlib.Path(f) for f in self.cfg['outputs'] ] @@ -302,6 +428,7 @@ def __init__(self, cfgFile=None, cfgDict=None): self.cfg['jobs'][jCfg['name']] = Config(cfgDict=jCfg) + # Finalize this config using baseCfg (which is assumed to be fully # initialized). def applyBase(self, baseCfg): @@ -316,27 +443,40 @@ def applyBase(self, baseCfg): self.cfg['base-deps'].append(str(self.cfg['base-img'])) self.cfg['img'] = getOpt('image-dir') / (self.cfg['name'] + ".img") - if 'host-init' in baseCfg: - self.cfg['base-deps'].append(str(baseCfg['host-init'])) + if 'bin' in baseCfg: + self.cfg['base-bin'] = baseCfg['bin'] - if 'linux-src' not in self.cfg: - self.cfg['linux-src'] = getOpt('linux-dir') + if 'dwarf' in baseCfg: + self.cfg['base-dwarf'] = baseCfg['dwarf'] - if 'linux-config' in baseCfg: - if 'linux-config' not in self.cfg: - self.cfg['linux-config'] = [] - # Order matters here! Later kfrags take precedence over earlier. - self.cfg['linux-config'] = baseCfg['linux-config'] + self.cfg['linux-config'] + if 'host-init' in baseCfg: + self.cfg['base-deps'].append(str(baseCfg['host-init'])) - if 'pk-src' not in self.cfg: - self.cfg['pk-src'] = getOpt('pk-dir') + inheritLinuxOpts(self.cfg, baseCfg) + inheritFirmwareOpts(self.cfg, baseCfg) - # We inherit the parent's binary for bare-metal configs, but not linux configs - # XXX This probably needs to be re-thought out. It's needed at least for including bare-metal binaries as a base for a job. - if 'linux-config' in self.cfg or 'bin' not in self.cfg: + if 'linux' in self.cfg or 'bin' not in self.cfg: + # Linux workloads get their own binary, whether from scratch or a + # copy of their parent's self.cfg['bin'] = getOpt('image-dir') / (self.cfg['name'] + "-bin") self.cfg['dwarf'] = getOpt('image-dir') / (self.cfg['name'] + "-bin-dwarf") + # To avoid needlessly recompiling kernels, we check if the child has + # the exact same binary-related configuration. If 'use-parent-bin' + # is set, buildBin will simply copy the parent's binary rather than + # compiling it from scratch. + self.cfg['use-parent-bin'] = True + for opt in ['firmware', 'linux', 'host-init']: + if opt not in self.cfg: + # Child doesn't overwrite a non-heritable option + continue + elif self.cfg.get(opt, None) != baseCfg.get(opt, None): + self.cfg['use-parent-bin'] = False + else: + # bare-metal workloads use the parent's binary directly rather than + # copying it like a Linux workload would + self.cfg['use-parent-bin'] = False + # Some defaults need to occur, even if you don't have a base if 'launch' not in self.cfg: self.cfg['launch'] = True @@ -345,6 +485,7 @@ def applyBase(self, baseCfg): self.cfg['run'] = getOpt('wlutil-dir') / 'null_run.sh' self.cfg['runSpec'] = RunSpec(script=self.cfg['run']) + # The following methods are needed by MutableMapping def __getitem__(self, key): return self.cfg[key] @@ -421,6 +562,7 @@ def __init__(self, dirs=None, paths=None): for f in list(self.cfgs.keys()): try: self._initializeFromBase(self.cfgs[f]) + except KeyError as e: log.warning("Skipping " + str(f) + ":") log.warning("\tMissing required option '" + e.args[0] + "'") diff --git a/wlutil/default-config.yaml b/wlutil/default-config.yaml index 839fb797..5421d542 100644 --- a/wlutil/default-config.yaml +++ b/wlutil/default-config.yaml @@ -9,10 +9,10 @@ board-dir : '../boards/firechip' image-dir : '../images' # Default linux source -linux-dir : '../riscv-linux' - -# Default pk source -pk-dir : '../riscv-pk' +# linux-dir : '../riscv-linux' +# +# # Default pk source +# pk-dir : '../boards/default/firmware/opensbi' # Runtime Logs log-dir : '../logs' diff --git a/wlutil/initramfs/disk/init b/wlutil/initramfs/disk/init index 2da839ab..d4f2d1b5 100755 --- a/wlutil/initramfs/disk/init +++ b/wlutil/initramfs/disk/init @@ -4,7 +4,7 @@ _setup_rootfs() { for blk in iceblk vda ; do if test -b "/dev/${blk}" ; then echo "Mounting /dev/${blk} as root device" - mount -o ro "/dev/${blk}" /mnt/root + mount -o rw "/dev/${blk}" /mnt/root return fi done @@ -29,6 +29,9 @@ _setup() ( return 1 fi + # Let users see interact with modules after boot. + cp -r /lib/modules /mnt/root/lib/ + umount /proc umount /sys # buildroot inittab(5) does not mount /dev itself diff --git a/wlutil/test.py b/wlutil/test.py index a01eeea2..5bc8889f 100755 --- a/wlutil/test.py +++ b/wlutil/test.py @@ -13,6 +13,8 @@ import textwrap import psutil from enum import Enum +import signal + from .wlutil import * from .build import * from .launch import * @@ -23,6 +25,56 @@ defBuildTimeout = 2400 defRunTimeout = 2400 +class TestFailure(Exception): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + + +# Fedora run output can be tricky to compare due to lots of non-deterministic +# output (e.g. timestamps, pids) This function takes the entire uartlog from a +# fedora run and returns only the output of auto-run scripts +def stripFedoraUart(lines): + stripped = [] + pat = re.compile(".*firesim.sh\[\d*\]: (.*\n)") + for l in lines: + match = pat.match(l) + if match: + stripped.append(match.group(1)) + + return stripped + + +def stripBrUart(lines): + stripped = [] + inBody = False + for l in lines: + if not inBody: + if re.match("launching firesim workload run/command", l): + inBody = True + else: + if re.match("firesim workload run/command done", l): + break + stripped.append(l) + + return stripped + + +def stripUartlog(config, uartlog): + if 'distro' in config: + if config['distro'] == 'fedora': + strippedUart = stripFedoraUart(uartlog) + elif config['distro'] == 'br': + strippedUart = stripBrUart(uartlog) + else: + strippedUart = uartlog + else: + strippedUart = uartlog + + return strippedUart + # Compares two runOutput directories. Returns None if they match or a message # describing the difference if they don't. # - Directory structures are compared directly (same folders in the same @@ -33,7 +85,7 @@ # - Files named "uartlog" in the reference output need only match a subset of # the test output (the entire reference uartlog contents must exist somewhere # in the test output). -def cmpOutput(testDir, refDir, strip=False): +def cmpOutput(config, testDir, refDir, strip=False): testDir = pl.Path(testDir) refDir = pl.Path(refDir) if not refDir.exists(): @@ -48,11 +100,19 @@ def cmpOutput(testDir, refDir, strip=False): if rPath.is_file(): # Regular file, should match exactly with open(str(rPath), 'r') as rFile: - with open(str(tPath), 'r') as tFile: + with open(str(tPath), 'r', newline="\n") as tFile: if rPath.name == "uartlog": rLines = rFile.readlines() tLines = tFile.readlines() + # Some configurations spit out a bunch of spurious \r\n + # (^M in vim) characters. This strips them so that + # users can type reference outputs using normal + # newlines. + tLines = [ line.replace("\r", "") for line in tLines] + if strip: + tLines = stripUartlog(config, tLines) + matcher = difflib.SequenceMatcher(None, rLines, tLines) m = matcher.find_longest_match(0, len(rLines), 0, len(tLines)) if m.size != len(rLines): @@ -71,68 +131,21 @@ def cmpOutput(testDir, refDir, strip=False): return None -def runTimeout(func, timeout): - def wrap(*args, **kwargs): - p = mp.Process(target=func, args=args, kwargs=kwargs) - p.start() - p.join(timeout) - if p.is_alive(): - # Kill all subprocesses (e.g. qemu) - for child in psutil.Process(p.pid).children(recursive=True): - child.kill() - p.terminate() - p.join() - raise TimeoutError(func.__name__) - elif p.exitcode != 0: - raise ChildProcessError(func.__name__) - - return wrap -# Fedora run output can be tricky to compare due to lots of non-deterministic -# output (e.g. timestamps, pids) This function takes the entire uartlog from a -# fedora run and returns only the output of auto-run scripts -def stripFedoraUart(lines): - stripped = "" - pat = re.compile(".*firesim.sh\[\d*\]: (.*\n)") - for l in lines: - match = pat.match(l) - if match: - stripped += match.group(1) +@contextmanager +def timeout(seconds, label): + """Raises TimeoutError if the block takes longer than 'seconds' (an integer)""" + def timeoutHandler(signum, fname): + raise TimeoutError(label) - return stripped - -def stripBrUart(lines): - stripped = "" - inBody = False - for l in lines: - if not inBody: - if re.match("launching firesim workload run/command", l): - inBody = True - else: - if re.match("firesim workload run/command done", l): - break - stripped += l - - return stripped - -def stripUartlog(config, outputPath): - outDir = pathlib.Path(outputPath) - for uartPath in outDir.glob("**/uartlog"): - with open(str(uartPath), 'r', errors='ignore') as uFile: - uartlog = uFile.readlines() - - if 'distro' in config: - if config['distro'] == 'fedora': - strippedUart = stripFedoraUart(uartlog) - elif config['distro'] == 'br': - strippedUart = stripBrUart(uartlog) - else: - strippedUart = "".join(uartlog) - else: - strippedUart = "".join(uartlog) + oldSignal = signal.signal(signal.SIGALRM, timeoutHandler) + signal.alarm(seconds) + try: + yield + finally: + signal.signal(signal.SIGALRM, oldSignal) + signal.alarm(0) - with open(str(uartPath), 'w') as uFile: - uFile.write(strippedUart) # Build and run a workload and compare results against the testing spec # ('testing' field in config) @@ -175,22 +188,30 @@ def testWorkload(cfgName, cfgs, verbose=False, spike=False, cmp_only=None): if cmp_only is None: # Build workload log.info("Building test workload") - runTimeout(buildWorkload, testCfg['buildTimeout'])(cfgName, cfgs) + ret = 0 + with timeout(testCfg['buildTimeout'], 'build'): + res = buildWorkload(cfgName, cfgs) + + if res != 0: + raise TestFailure("Failure when building workload " + cfgName) # Run every job (or just the workload itself if no jobs) if 'jobs' in cfg: for jName in cfg['jobs'].keys(): log.info("Running job " + jName) - runTimeout(launchWorkload, testCfg['runTimeout'])(cfg, job=jName, spike=spike, interactive=verbose) + with timeout(testCfg['runTimeout'], 'launch job' + jName): + launchWorkload(cfg, job=jName, spike=spike, interactive=verbose) else: log.info("Running workload") - runTimeout(launchWorkload, testCfg['runTimeout'])(cfg, spike=spike, interactive=verbose) + with timeout(testCfg['runTimeout'], 'launch'): + launchWorkload(cfg, spike=spike, interactive=verbose) log.info("Testing outputs") + strip = False if 'strip' in testCfg and testCfg['strip']: - stripUartlog(cfg, testPath) + strip = True - diff = cmpOutput(testPath, refPath) + diff = cmpOutput(cfg, testPath, refPath, strip=strip) if diff is not None: suitePass = False log.info("Test " + cfgName + " failure: output does not match reference") @@ -208,14 +229,9 @@ def testWorkload(cfgName, cfgs, verbose=False, spike=False, cmp_only=None): return testResult.failure, testPath - except ChildProcessError as e: + except TestFailure as e: suitePass = False - if e.args[0] == "buildWorkload": - log.info("Test " + cfgName + " failure: Exception while building") - elif e.args[0] == "launchWorkload": - log.info("Test " + cfgName + " failure: Exception while running") - else: - log.error("Internal tester error: exception in unknown function: " + e.args[0]) + log.info(e.msg) return testResult.failure, testPath @@ -226,20 +242,3 @@ def testWorkload(cfgName, cfgs, verbose=False, spike=False, cmp_only=None): return testResult.failure, testPath return testResult.success, testPath - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Check the outupt of a workload against a reference output. The reference directory should match the layout of test directory including any jobs, uartlogs, or file outputs. Reference uartlogs can be a subset of the full output (this will check only that the reference uartlog content exists somewhere in the test uartlog).") - parser.add_argument("testDir", help="Run output directory to test.") - parser.add_argument("refDir", help="Reference output directory.") - - args = parser.parse_args() - res = cmpOutput(args.testDir, args.refDir) - if res is not None: - print("Failure:") - print(res) - sys.exit(os.EX_DATAERR) - else: - print("Success") - sys.exit(os.EX_OK) - - diff --git a/wlutil/wlutil.py b/wlutil/wlutil.py index d7d2eba0..1ce76233 100644 --- a/wlutil/wlutil.py +++ b/wlutil/wlutil.py @@ -18,6 +18,7 @@ import yaml import re import pprint +import doit # Useful for defining lists of files (e.g. 'files' part of config) FileSpec = collections.namedtuple('FileSpec', [ 'src', 'dst' ]) @@ -28,7 +29,8 @@ # List of marshal submodules (those enabled by init-submodules.sh) marshalSubmods = [ 'linux-dir', - 'pk-dir', + 'bbl-dir', + 'opensbi-dir', 'busybox-dir', 'buildroot-dir', 'driver-dirs' @@ -100,7 +102,8 @@ def cleanPaths(opts, baseDir=pathlib.Path('.')): 'image-dir', 'linux-dir', 'firesim-dir', - 'pk-dir', + 'bbl-dir', + 'opensbi-dir', 'log-dir', 'res-dir', 'workload-dirs' @@ -131,9 +134,7 @@ def clean(path): 'workload-dirs', 'board-dir', 'image-dir', - 'linux-dir', 'firesim-dir', - 'pk-dir', 'log-dir', 'res-dir', 'jlevel', # int or str from user, converted to '-jN' after loading @@ -174,6 +175,15 @@ def clean(path): # List of paths to linux driver sources to use 'driver-dirs', + # Linux source to use by default (can be overwritten by user config). Derived from board-dir. + 'linux-dir', + + # Default pk/bbl source to use by default (can be overwritten by user config). Derived from board-dir. + 'bbl-dir', + + # Default OpenSBI source to use by default (can be overwritten by user config). Derived from board-dir. + 'opensbi-dir', + # Buildroot source directory 'buildroot-dir', @@ -290,7 +300,12 @@ def deriveOpts(self): self['run-name'] = "" self['rootfs-margin'] = humanfriendly.parse_size(str(self['rootfs-margin'])) self['jlevel'] = '-j' + str(self['jlevel']) + self['driver-dirs'] = list(self['board-dir'].glob('drivers/*')) + self['bbl-dir'] = self['board-dir'] / 'firmware' / 'riscv-pk' + self['opensbi-dir'] = self['board-dir'] / 'firmware' / 'opensbi' + self['linux-dir'] = self['board-dir'] / 'linux' + self['buildroot-dir'] = self['wlutil-dir'] / 'br' / 'buildroot' self['linux-make-args'] = ["ARCH=riscv", "CROSS_COMPILE=riscv64-unknown-linux-gnu-"] @@ -377,11 +392,13 @@ def getOpt(opt): else: return ctx[opt] -# logging setup: You can call this multiple times to reset logging (e.g. if you -# change the RunName) fileHandler = None consoleHandler = None -def initLogging(verbose): +def initLogging(verbose, logPath=None): + """logging setup: You can call this multiple times to reset logging (e.g. if you + change the RunName). If 'logPath' is set, that path will be used. + Otherwise the name will be derived from the current configuration.""" + global fileHandler global consoleHandler @@ -389,7 +406,8 @@ def initLogging(verbose): rootLogger.setLevel(logging.NOTSET) # capture everything # Create a unique log name - logPath = getOpt('log-dir') / (getOpt('run-name') + '.log') + if logPath is None: + logPath = getOpt('log-dir') / (getOpt('run-name') + '.log') # formatting for log to file if fileHandler is not None: @@ -416,7 +434,7 @@ def initLogging(verbose): # Run subcommands and handle logging etc. # The arguments are identical to those for subprocess.call() # level - The logging level to use -# check - Throw an error on non-zero return status? +# check - Throw an error on non-zero return status def run(*args, level=logging.DEBUG, check=True, **kwargs): log = logging.getLogger() @@ -497,16 +515,7 @@ def toCpio(src, dst): stderr=sp.PIPE, stdout=outCpio, cwd=src) log.debug(p.stderr.decode('utf-8')) -# Apply the overlay directory "overlay" to the filesystem image "img" -# Note that all paths must be absolute -def applyOverlay(img, overlay): - log = logging.getLogger() - flist = [] - for f in overlay.glob('*'): - flist.append(FileSpec(src=f, dst=pathlib.Path('/'))) - copyImgFiles(img, flist, 'in') - def resizeFS(img, newSize=0): """Resize the rootfs at img to newSize. @@ -538,6 +547,7 @@ def resizeFS(img, newSize=0): run(['resize2fs', str(img)]) return + def copyImgFiles(img, files, direction): """Copies a list of type FileSpec ('files') to/from the destination image (img). @@ -559,6 +569,17 @@ def copyImgFiles(img, files, direction): else: raise ValueError("direction option must be either 'in' or 'out'") + +def applyOverlay(img, overlay): + """Apply the overlay directory "overlay" to the filesystem image "img" + Note that all paths must be absolute""" + flist = [] + for f in overlay.glob('*'): + flist.append(FileSpec(src=f, dst=pathlib.Path('/'))) + + copyImgFiles(img, flist, 'in') + + _toolVersions = None def getToolVersions(): """Detect version information for the currently enabled toolchain.""" @@ -649,6 +670,7 @@ def checkGitStatus(submodule): return status + def checkSubmodule(s): """Check whether a submodule is present and initialized. @@ -663,10 +685,56 @@ def checkSubmodule(s): if not s.exists() or not any(os.scandir(s)): raise SubmoduleError(s) -# The doit.tools.config_changed helper doesn't support multiple invocations in -# a single uptodate. I fix that bug here, otherwise it's a direct copy from their + +class WithMetadataChecker(doit.dependency.MD5Checker): + """This checker is similar to the default MD5Checker, but it includes file + metadata including mode and user/group ids. + see https://github.com/pydoit/doit/blob/0.31.1/doit/dependency.py for details + """ + + @staticmethod + def extract_stat(stat): + return (stat.st_mode, stat.st_uid, stat.st_gid) + + def check_modified(self, file_path, file_stat, state): + state = tuple(state) + meta = self.extract_stat(file_stat) + if meta != state[3:]: + return True + else: + return super().check_modified(file_path, file_stat, state[:3]) + + def get_state(self, dep, current_state): + stat = self.extract_stat(os.stat(dep)) + if current_state is None: + md5State = super().get_state(dep, None) + metaState = stat + else: + current_state = tuple(current_state) + md5State = super().get_state(dep, current_state[0:3]) + if stat == current_state[3:]: + metaState = None + else: + metaState = stat + + # Merge the two state sources + if md5State is None and metaState is None: + return None + else: + if md5State is None: + md5State = current_state[0:3] + elif metaState is None: + metaState = stat + + return md5State + stat + +# The doit.tools.config_changed helper has a few limitations: +# - doesn't support multiple invocations in a single uptodate. +# - It is not JSON serializable which means you can't use it as a calc_dep +# (doit saves calc_dep to the DB). Fixed by subclassing dict. +# I fix these here, otherwise it's a direct copy from their # code. See https://github.com/pydoit/doit/issues/333. -class config_changed(object): +class config_changed(dict): """check if passed config was modified @var config (str) or (dict) @var encoder (json.JSONEncoder) Encoder used to convert non-default values. @@ -675,6 +743,7 @@ def __init__(self, config, encoder=None): self.config = config self.config_digest = None self.encoder = encoder + dict.__init__(self) def _calc_digest(self): if isinstance(self.config, str):