diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 4db43f45fe..c8c33a2924 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -1,70 +1,49 @@ --- -name: macos-latest +name: macOS on: [push, pull_request] jobs: - compile: # name of the job. Jobs run in parallel unless specified otherwise. - name: compile - runs-on: macos-latest - strategy: - matrix: - python-version: ['3.6'] - build-version: ["release", "debug"] - steps: # each - is a new sequentially run step - - uses: actions/checkout@master - - name: Setup python - uses: actions/setup-python@v1 + test: + name: Compile and test planner + timeout-minutes: 60 + runs-on: macos-10.15 + steps: + - name: Clone repository + uses: actions/checkout@master + + - name: Install Python + uses: actions/setup-python@master with: - python-version: ${{ matrix.python-version }} - architecture: x64 + python-version: 3.6 - - name: compile + - name: Compile planner run: | - export CXXFLAGS="-Werror" # Treat compilation warnings as errors. - ./build.py ${{ matrix.build-version }} + export CXXFLAGS="-Werror" # Treat compilation warnings as errors. + ./build.py + ./build.py --debug - - name: upload-planner - uses: actions/upload-artifact@master - with: - name: ${{ matrix.build-version }} - path: builds/${{ matrix.build-version }} - - - test: # name of the job. Jobs run in parallel unless specified otherwise. - name: test - runs-on: macos-latest - needs: compile - strategy: - matrix: - python-version: ['3.6'] - steps: # each - is a new sequentially run step - - uses: actions/checkout@master - - - name: Setup python - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - architecture: x64 - - - name: setup + - name: Install tox run: | - pip3 install pytest tox - #brew install valgrind # TODO: does not work - mkdir builds - - - name: download-planner - uses: actions/download-artifact@master - with: - # without 'name' attribute all artifacts are downloaded and the - # artifact name is used as directory name. - path: builds/ + pip3 install tox - - name: test + - name: Install VAL + run: | + brew install gnu-sed + git clone https://github.com/KCL-Planning/VAL.git + cd VAL + git checkout a5565396007eee73ac36527fbf904142b3077c74 + make clean # Remove old build artifacts and binaries. + gsed -i 's/-Werror //g' Makefile # Ignore warnings. + make -j2 + mv validate ../ + cd ../ + rm -rf VAL + + - name: Run driver, translator and search tests run: | - chmod +x builds/debug/bin/downward - chmod +x builds/release/bin/downward - cd misc/ - tox -e search,translator + export PATH="$(pwd):$PATH" # Add VAL to path. + cd misc + tox -e driver,translator,search ... diff --git a/.github/workflows/misc/cplex129_windows_installer.properties b/.github/workflows/misc/cplex129_windows_installer.properties new file mode 100644 index 0000000000..71b40e8b4c --- /dev/null +++ b/.github/workflows/misc/cplex129_windows_installer.properties @@ -0,0 +1,58 @@ +# Thu Feb 11 12:01:26 CET 2021 +# Replay feature output +# --------------------- +# This file was built by the Replay feature of InstallAnywhere. +# It contains variables that were set by Panels, Consoles or Custom Code. +# +# To generate this file: +# 1. Install CPLEX from the command line: ./installer -r installer.properties +# 2. Adapt the directory paths in the resulting 'installer.properties' file +# 3. Add to the file: +# #Silent Installation +# INSTALLER_UI=silent +# For further information visit: +# https://www.ibm.com/support/knowledgecenter/SSSA5P_12.9.0/ilog.odms.studio.help/Optimization_Studio/topics/td_silent_install.html + + +#Accept license agreement +#--------------------------------------------------- +LICENSE_ACCEPTED=TRUE + +#Choose installation directory +#----------------------------- +USER_INSTALL_DIR=D:\\a\\downward\\cplex_temp + +#Copy examples +#------------------- +CPLEX_STUDIO_EXAMPLES_DIR=D:\\a\\downward\\cplex_examples +CPLEX_STUDIO_SAMPLE_COPY_NOT_ACTIVATED=0 + +#Associate files with CPLEX +#-------------- +CPLEX_STUDIO_FILE_ASSOCIATION=0 + +#Update PATH variable +#-------------------- +CPLEX_STUDIO_PATH_UPDATE=1 + +#Silent Installation +INSTALLER_UI=silent + +#Install +#------------ +-fileOverwrite_D\:\\a\\downward\\cplex_temp\\README.html=Yes +-fileOverwrite_D\:\\a\\downward\\cplex_temp\\Uninstall\\Uninstall.lax=Yes +-fileOverwrite_D\:\\a\\downward\\cplex_temp\\Uninstall\\resource\\iawin64_x64.dll=Yes +-fileOverwrite_D\:\\a\\downward\\cplex_temp\\Uninstall\\resource\\iawin32.dll=Yes +-fileOverwrite_D\:\\a\\downward\\cplex_temp\\Uninstall\\resource\\win64_32_x64.exe=Yes +-fileOverwrite_D\:\\a\\downward\\cplex_temp\\Uninstall\\resource\\remove.exe=Yes +-fileOverwrite_D\:\\a\\downward\\cplex_temp\\Uninstall\\resource\\invoker.exe=Yes +-fileOverwrite_D\:\\a\\downward\\cplex_temp\\Uninstall\\ibm_uninsticon.ico=Yes +-fileOverwrite_D\:\\a\\downward\\cplex_temp\\opl\\oplide\\oplide_installer.bat=Yes +-fileOverwrite_D\:\\a\\downward\\cplex_temp\\opl\\oplide\\oplide.exe=Yes +-fileOverwrite_D\:\\a\\downward\\cplex_examples\\.samples=Yes + +#Post installation steps +#------------------------------- +CPLEX_STUDIO_README=0 +CPLEX_STUDIO_IDE=0 diff --git a/.github/workflows/misc/set-visual-studio-static-runtime-libraries.py b/.github/workflows/misc/set-visual-studio-static-runtime-libraries.py new file mode 100644 index 0000000000..1c40611d04 --- /dev/null +++ b/.github/workflows/misc/set-visual-studio-static-runtime-libraries.py @@ -0,0 +1,54 @@ +""" +Use this script to set the "RuntimeLibrary" property of a Visual Studio project +file to static linking libraries for all configurations. +""" +from xml.etree import ElementTree +import os +import sys + +ElementTree.register_namespace( + "", "http://schemas.microsoft.com/developer/msbuild/2003") + + +def adapt(in_project, out_project): + tree = ElementTree.parse(in_project) + + root = tree.getroot() + for item_definition_group in root.findall( + '{http://schemas.microsoft.com/developer/msbuild/2003}ItemDefinitionGroup'): + condition = item_definition_group.attrib["Condition"] + is_release = any(x in condition.lower() + for x in ["release", "minsizerel", "relwithdebinfo"]) + is_debug = "debug" in condition.lower() + assert is_release ^ is_debug, condition + + compiler_args = item_definition_group.findall( + '{http://schemas.microsoft.com/developer/msbuild/2003}ClCompile') + assert len(compiler_args) == 1, compiler_args + compiler_args = compiler_args[0] + + runtime_library = compiler_args.findall( + '{http://schemas.microsoft.com/developer/msbuild/2003}RuntimeLibrary') + if len(runtime_library) == 0: + runtime_library = ElementTree.Element("RuntimeLibrary") + compiler_args.append(runtime_library) + elif len(runtime_library) == 1: + runtime_library = runtime_library[0] + else: + assert False, runtime_library + + runtime_library.text = "MultiThreaded" + if is_debug: + runtime_library.text += "Debug" + + tree.write(out_project) + + +if __name__ == "__main__": + if len(sys.argv) != 3: + sys.exit(f"{sys.argv[0]} [OLD_VS_PROJECT_FILE] [OUTPUT_FILE]") + _, in_path, out_path = sys.argv + if not os.path.isfile(in_path): + sys.exit(f"{in_path} does not exist!") + + adapt(in_path, out_path) diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml new file mode 100644 index 0000000000..8848ab4418 --- /dev/null +++ b/.github/workflows/style.yml @@ -0,0 +1,44 @@ +--- +name: Code style tests + +on: [push, pull_request] + +jobs: + style: + name: Test code style + runs-on: ubuntu-18.04 + steps: + - name: Clone repository + uses: actions/checkout@master + + - name: Install Python + uses: actions/setup-python@master + with: + python-version: 3.6 + + - name: Install dependencies + run: | + pip3 install tox + sudo apt-get -y install clang-tidy-8 + + - name: Install uncrustify + run: | + # Set up uncrustify. + wget https://github.com/uncrustify/uncrustify/archive/uncrustify-0.67.tar.gz + tar xzf uncrustify-0.67.tar.gz + cd uncrustify-uncrustify-0.67 + mkdir build + cd build + cmake ../ + make -j2 + mv uncrustify ../../ + cd ../../ + rm -rf uncrustify-0.67.tar.gz uncrustify-uncrustify-0.67 + + - name: Run code style tests + run: | + export PATH="$(pwd):$PATH" # Add uncrustify to path. + cd misc/ + tox -e style,clang-tidy + +... diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 3c29a0539c..a97ffde336 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -1,80 +1,68 @@ --- -name: ubuntu +name: Ubuntu on: [push, pull_request] -## Unfortunately, we didn't manage to use something like "variables" -## globally. Github actions can set environment variables globally, -## but they can only be single values, not lists, and ideally we would -## like to have something like COMPILER_VERSIONS = [gcc, ..]. -## Now, whenever we change versions, we have to remember to do this -## *everywhere* in this file. - jobs: - compile: # identifier of the job. Jobs run in parallel unless specified otherwise. - name: compile + compile: + name: Compile planner + timeout-minutes: 60 runs-on: ${{ matrix.ubuntu-version }} strategy: matrix: ubuntu-version: [ubuntu-18.04, ubuntu-20.04] - compiler-version: [gcc, gcc-10, clang, clang-11] + compiler-version: + - {cc: gcc, cxx: g++} + - {cc: gcc-10, cxx: g++-10} + - {cc: clang, cxx: clang++} + - {cc: clang-11, cxx: clang++-11} python-version: [3.6] + # Unfortunately, we couldn't figure out a way to name the + # compiler versions so that we don't have to copy them here. exclude: - ubuntu-version: ubuntu-18.04 - compiler-version: gcc-10 + compiler-version: {cc: gcc-10, cxx: g++-10} - ubuntu-version: ubuntu-18.04 - compiler-version: clang-11 - steps: # each - is a new sequentially run step - - name: clone-repo - uses: actions/checkout@v1 + compiler-version: {cc: clang-11, cxx: clang++-11} + env: + CC: ${{ matrix.compiler-version.cc }} + CXX: ${{ matrix.compiler-version.cxx }} + CPLEX_URL: ${{ secrets.CPLEX129_LINUX_URL }} + SOPLEX_URL: ${{ secrets.SOPLEX311_URL }} + DOWNWARD_CPLEX_ROOT: /home/runner/work/downward/lib/ibm/ILOG/CPLEX_Studio129/cplex + DOWNWARD_SOPLEX_ROOT: /home/runner/work/downward/lib/soplex-3.1.1 + DOWNWARD_COIN_ROOT: /home/runner/work/downward/lib/coin + steps: + - name: Clone repository + uses: actions/checkout@master - - name: setup-python - uses: actions/setup-python@v1 + - name: Install Python + uses: actions/setup-python@master with: python-version: ${{ matrix.python-version }} - - name: setup-dependencies + - name: Install dependencies run: | - sudo apt-get update - sudo apt-get -y install zlib1g-dev libgmp3-dev - sudo apt-get -y install ${{ matrix.compiler-version }} - export CC=${{ matrix.compiler-version }} - if [[ $CC == gcc* ]]; then - export CXX="$(echo "${CC}" | sed "s/cc/++/g")"; - elif [[ $CC == clang* ]]; then - export CXX="$(echo "${CC}" | sed "s/clang/clang++/g")"; - else - echo "Unknown compiler version"; - exit 1; - fi + sudo apt-get -y install zlib1g-dev libgmp3-dev ${{ matrix.compiler-version.cc }} mkdir /home/runner/work/downward/lib - # We only want to setup osi if both LP solvers are set, hence + # We only want to set up Osi if both LP solvers are set, hence # we execute the following three steps only if both secrets # are set. - - name: setup-cplex - env: - CPLEX_URL: ${{ secrets.CPLEX_URL }} - SOPLEX_URL: ${{ secrets.SOPLEX_URL }} - DOWNWARD_CPLEX_ROOT: /home/runner/work/downward/lib/ibm/ILOG/CPLEX_Studio129 + - name: Install CPLEX if: ${{ env.CPLEX_URL != 0 && env.SOPLEX_URL != 0 }} run: | # We redirect output of wget to hide the secret URLs. - wget $CPLEX_URL &> /dev/null - export CPLEX_INSTALLER=cplex_studio129.linux-x86-64.bin - chmod +x $CPLEX_INSTALLER - ./$CPLEX_INSTALLER -DLICENSE_ACCEPTED=TRUE -DUSER_INSTALL_DIR=${DOWNWARD_CPLEX_ROOT} -i silent - rm $CPLEX_INSTALLER - - - name: setup-soplex - env: - CPLEX_URL: ${{ secrets.CPLEX_URL }} - SOPLEX_URL: ${{ secrets.SOPLEX_URL }} - DOWNWARD_SOPLEX_ROOT: /home/runner/work/downward/lib/soplex-3.1.1 + wget -O cplex_installer $CPLEX_URL &> /dev/null + chmod +x cplex_installer + ./cplex_installer -DLICENSE_ACCEPTED=TRUE -DUSER_INSTALL_DIR="$(dirname "${DOWNWARD_CPLEX_ROOT}")" -i silent + rm cplex_installer + + - name: Install SoPlex if: ${{ env.CPLEX_URL != 0 && env.SOPLEX_URL != 0 }} run: | # We redirect output of wget to hide the secret URLs. - wget $SOPLEX_URL &> /dev/null + wget -O soplex-3.1.1.tgz $SOPLEX_URL &> /dev/null tar xvzf soplex-3.1.1.tgz cd soplex-3.1.1 mkdir build @@ -85,13 +73,7 @@ jobs: cd ../../ rm -r soplex-3.1.1 - - name: setup-osi - env: - CPLEX_URL: ${{ secrets.CPLEX_URL }} - SOPLEX_URL: ${{ secrets.SOPLEX_URL }} - DOWNWARD_CPLEX_ROOT: /home/runner/work/downward/lib/ibm/ILOG/CPLEX_Studio129/cplex - DOWNWARD_SOPLEX_ROOT: /home/runner/work/downward/lib/soplex-3.1.1 - DOWNWARD_COIN_ROOT: /home/runner/work/downward/lib/coin + - name: Install Osi if: ${{ env.CPLEX_URL != 0 && env.SOPLEX_URL != 0 }} run: | wget http://www.coin-or.org/download/source/Osi/Osi-0.107.9.tgz @@ -109,125 +91,118 @@ jobs: --with-soplex-lib="-lsoplex" \ --with-cplex-incdir=$DOWNWARD_CPLEX_ROOT/include/ilcplex \ --with-cplex-lib="-lcplex -lm -ldl" # -ldl is only needed for CPLEX >= 12.8 - make + make -j2 make install cd ../ rm -r Osi-0.107.9.tgz Osi-0.107.9 - - name: compile - env: - DOWNWARD_CPLEX_ROOT: /home/runner/work/downward/lib/ibm/ILOG/CPLEX_Studio129/cplex - DOWNWARD_SOPLEX_ROOT: /home/runner/work/downward/lib/soplex-3.1.1 - DOWNWARD_COIN_ROOT: /home/runner/work/downward/lib/coin + - name: Compile planner run: | - export CXXFLAGS="-Werror" # Treat compilation warnings as errors. + export CXXFLAGS="-Werror" # Treat compilation warnings as errors. ./build.py --debug ./build.py - - name: setup-uncrustify - run: | - # Set up uncrustify. - wget https://github.com/uncrustify/uncrustify/archive/uncrustify-0.67.tar.gz - tar xzf uncrustify-0.67.tar.gz - cd uncrustify-uncrustify-0.67 - mkdir build - cd build - cmake ../ - make -j8 - mv uncrustify ../../ - cd ../../ - rm -rf uncrustify-0.67.tar.gz uncrustify-uncrustify-0.67 - - - name: setup-val - run: | - # Set up VAL. - sudo apt-get -y install flex bison - git clone https://github.com/KCL-Planning/VAL.git - cd VAL - git checkout a5565396007eee73ac36527fbf904142b3077c74 - make clean # Remove old build artifacts and binaries. - sed -i 's/-Werror //g' Makefile # Ignore warnings. - make -j8 - mv validate ../ - cd ../ - ls -la - rm -rf VAL - - - name: archive-files + - name: Archive required files # We only run tests on the version compiled with gcc, so we # only need to archive that one. - if: ${{ matrix.compiler-version }} == gcc + if: ${{ matrix.compiler-version.cc == 'gcc' }} + # We determined the dynamically-linked libraries using ldd. We + # archive the entire lib directory of Osi because we need all + # 4 large library files and several file links to these. run: | cd ../ - tar cfz ${{ matrix.compiler-version }}.tar.gz downward lib + libs="" + if [[ ! -z "${CPLEX_URL}" || ! -z "${SOPLEX_URL}" ]]; then + libs="${libs} lib/coin/lib/" + fi + if [[ ! -z "${CPLEX_URL}" ]]; then + libs="${libs} lib/ibm/ILOG/CPLEX_Studio129/cplex/bin/x86-64_linux/libcplex1290.so" + fi + + tar cfz archive.tar.gz downward/fast-downward.py downward/driver downward/misc downward/builds/debug/bin/ downward/builds/release/bin/ ${libs} - - name: upload-files - if: ${{ matrix.compiler-version }} == gcc - uses: actions/upload-artifact@v1 + - name: Upload archive + if: ${{ matrix.compiler-version.cc == 'gcc' }} + uses: actions/upload-artifact@master with: name: compiled-planner-${{ matrix.ubuntu-version }} - path: /home/runner/work/downward/${{ matrix.compiler-version }}.tar.gz + path: /home/runner/work/downward/archive.tar.gz + retention-days: 1 test: - name: test - runs-on: ${{ matrix.ubuntu-version }} + name: Test planner + runs-on: ${{ matrix.version.ubuntu }} needs: compile # TODO: this only depends on the compile step with gcc strategy: matrix: - ubuntu-version: [ubuntu-18.04, ubuntu-20.04] - compiler-version: [gcc] - python-version: [3.6, 3.8] - exclude: - - ubuntu-version: ubuntu-18.04 - python-version: 3.8 + version: + - {ubuntu: ubuntu-18.04, python: 3.6} + - {ubuntu: ubuntu-20.04, python: 3.8} + env: + CPLEX_URL: ${{ secrets.CPLEX129_LINUX_URL }} + SOPLEX_URL: ${{ secrets.SOPLEX311_URL }} steps: - - name: setup-python - uses: actions/setup-python@v1 + - name: Download archive + uses: actions/download-artifact@master with: - python-version: ${{ matrix.python-version }} + name: compiled-planner-${{ matrix.version.ubuntu }} - - name: download-files - uses: actions/download-artifact@v1 + - name: Delete artifact + uses: geekyeggo/delete-artifact@master with: - name: compiled-planner-${{ matrix.ubuntu-version }} + name: compiled-planner-${{ matrix.version.ubuntu }} + + - name: Install Python + uses: actions/setup-python@master + with: + python-version: ${{ matrix.version.python }} - - name: extract-files - # We need to make sure that paths are the same as in the first job, - # otherwise cmake exits with an error when called during tests. - # Alternatively, we could change the tests so that they don't build. + - name: Install dependencies run: | - cd /home/runner/work/downward/downward/compiled-planner-${{ matrix.ubuntu-version }} - tar xfz ${{ matrix.compiler-version }}.tar.gz - shopt -s dotglob - mv downward/* ../ - mv lib/ ../../ + pip3 install tox + sudo apt-get -y install zlib1g-dev libgmp3-dev gcc flex bison + + # NOTE: VAL does not compile with clang-11. + - name: Install VAL + run: | + git clone https://github.com/KCL-Planning/VAL.git + cd VAL + git checkout a5565396007eee73ac36527fbf904142b3077c74 + make clean # Remove old build artifacts and binaries. + sed -i 's/-Werror //g' Makefile # Ignore warnings. + make -j2 + mv validate ../ cd ../ - rm -r compiled-planner-${{ matrix.ubuntu-version }} + rm -rf VAL + echo `pwd` >> $GITHUB_PATH # Add VAL to path of subsequent steps. - - name: setup-dependencies + - name: Extract archive + # We need to make sure that library paths are the same as + # during compilation. run: | - pip3 install pytest tox - sudo apt-get update - sudo apt-get -y install clang-tidy-8 valgrind zlib1g-dev libgmp3-dev - sudo apt-get -y install ${{ matrix.compiler-version }} - - - name: test - env: - DOWNWARD_CPLEX_ROOT: /home/runner/work/downward/lib/ibm/ILOG/CPLEX_Studio129/cplex - DOWNWARD_SOPLEX_ROOT: /home/runner/work/downward/lib/soplex-3.1.1 - DOWNWARD_COIN_ROOT: /home/runner/work/downward/lib/coin + tar xfz archive.tar.gz + shopt -s dotglob + mv downward/* . + if [[ ! -z "${CPLEX_URL}" || ! -z "${SOPLEX_URL}" ]]; then + mv lib/ ../ + fi + + - name: Run driver, translator and search tests run: | - export PATH="$(pwd):$PATH" # Add uncrustify and VAL to PATH. cd misc/ - python_version=${{ matrix.python-version }} - if [[ $python_version=="3.6" ]]; then - tox -e py36,translator,search,style - elif [[ $python_version=="3.8" ]]; then - tox -e py38,translator,search,style - else - echo "Unknown Python version"; - exit 1; - fi + tox -e driver,translator,search + + - name: Run CPLEX tests + if: ${{ env.CPLEX_URL != 0 && env.SOPLEX_URL != 0 }} + run: | + cd misc/ + tox -e cplex + + - name: Run SoPlex tests + if: ${{ env.CPLEX_URL != 0 && env.SOPLEX_URL != 0 }} + run: | + cd misc/ + tox -e soplex ... diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 47971e07f7..a633316303 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -1,71 +1,177 @@ --- -name: windows-latest +name: Windows on: [push, pull_request] + env: ARCH: "x64" - VC: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Enterprise\\VC\\Auxiliary\\Build\\vcvarsall.bat" + CC: cl + CXX: cl + + DOWNWARD_COIN_ROOT_RELEASE: D:\a\downward\osi_release + DOWNWARD_COIN_ROOT_DEBUG: D:\a\downward\osi_debug + DOWNWARD_CPLEX_ROOT: D:\a\downward\cplex + ZLIB_ROOT: D:\a\downward\zlib + + CPLEX_URL: "${{ secrets.CPLEX129_WINDOWS_URL }}" + OSI_URL: "https://www.coin-or.org/download/source/Osi/Osi-0.107.9.tgz" + ZLIB_URL: "https://www.zlib.net/zlib1211.zip" + + jobs: - compile: # name of the job. Jobs run in parallel unless specified otherwise. - name: compile - runs-on: windows-latest + test: + name: Compile and test planner + timeout-minutes: 60 + runs-on: ${{ matrix.platform.os }} strategy: matrix: - python-version: ['3.6'] - build-version: ["release", "debug"] - steps: # each - is a new sequentially run step - - uses: actions/checkout@master + platform: + - {os: "windows-2016", vc: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Enterprise\\VC\\Auxiliary\\Build\\vcvarsall.bat"} + - {os: "windows-2019", vc: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Enterprise\\VC\\Auxiliary\\Build\\vcvarsall.bat"} + python-version: [3.6] + steps: + - name: Clone repository + uses: actions/checkout@master - - name: Setup python - uses: actions/setup-python@v1 + - name: Install Python + uses: actions/setup-python@master with: python-version: ${{ matrix.python-version }} - architecture: ${{ env.ARCH }} - - name: compile + + - name: Install zlib + if: ${{ env.CPLEX_URL != 0 }} shell: cmd run: | - "${{ env.VC }}" ${{ env.ARCH }} & python build.py ${{ matrix.build-version }} + call "${{ matrix.platform.vc }}" %ARCH% - - name: upload-planner - uses: actions/upload-artifact@master - with: - name: ${{ matrix.build-version }} - path: builds/${{ matrix.build-version }} + cd .. + curl.exe --output zlib.zip %ZLIB_URL% + unzip zlib.zip + del zlib.zip + mkdir zlib + cd zlib + echo "Set up zlib include directory" + move ../zlib-1.2.11 include - tests: # name of the job. Jobs run in parallel unless specified otherwise. - name: test - runs-on: windows-latest - needs: compile - strategy: - matrix: - python-version: ['3.6'] - steps: # each - is a new sequentially run step - - uses: actions/checkout@master + echo "Compile zlib library" + cd include + nmake /f win32/Makefile.msc + mkdir ..\lib + move zdll.lib ..\lib\zdll.lib + move zlib.lib ..\lib\zlib.lib + move zlib1.dll ..\lib\zlib1.dll - - name: Setup python - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - architecture: ${{ env.ARCH }} - - name: setup + - name: Install CPLEX + if: ${{ env.CPLEX_URL != 0 }} run: | - pip3 install pytest tox - mkdir builds + echo "For information about the CPLEX silent installation consult:" + echo "https://www.ibm.com/support/knowledgecenter/SSSA5P_12.9.0/ilog.odms.studio.help/Optimization_Studio/topics/td_silent_install.html" + curl.exe --output cplex.exe $ENV:CPLEX_URL - - name: download-planner - uses: actions/download-artifact@master - with: - # without 'name' attribute all artifacts are downloaded and the - # artifact name is used as directory name. - path: builds/ + echo "Install CPLEX" + Start-Process -FilePath .\cplex.exe -ArgumentList "-f", "D:\a\downward\downward\.github\workflows\misc\cplex129_windows_installer.properties" -PassThru | Wait-Process + del .\cplex.exe + + echo "Copy the relevant directory to a location which is not magically protected against cmake" + Xcopy /E /I ..\cplex_temp\cplex ..\cplex + + + - name: Install Coin + shell: cmd + if: ${{ env.CPLEX_URL != 0 }} + run: | + call "${{ matrix.platform.vc }}" %ARCH% + set SET_RUNTIME_LIBRARY=python D:\a\downward\downward\.github\workflows\misc\set-visual-studio-static-runtime-libraries.py + + cd .. + echo "Download OSI" + curl.exe --output osi.tgz %OSI_URL% + tar xf osi.tgz + del osi.tgz + cd Osi-0.107.9 + + echo "Set up Include Directory" + mkdir ..\osi_release\include + copy CoinUtils\src\*.hpp ..\osi_release\include + copy CoinUtils\src\*.h ..\osi_release\include + copy Osi\src\Osi\*.hpp ..\osi_release\include + copy Osi\src\Osi\*.h ..\osi_release\include + copy Osi\src\OsiCpx\*.hpp ..\osi_release\include + copy Osi\src\OsiCpx\*.h ..\osi_release\include + Xcopy /E /I ..\osi_release\include ..\osi_debug\include + + echo "Set up Lib Directory" + mkdir ..\osi_release\lib + mkdir ..\osi_debug\lib + + echo "Compile libOsi" + cd Osi\MSVisualStudio\v10\ + devenv Osi.sln /Upgrade + cd libOsi\ + %SET_RUNTIME_LIBRARY% libOsi.vcxproj libOsi.vcxproj + msbuild libOsi.vcxproj /p:Configuration=Release /p:Platform=x64 /p:DefaultWindowsSDKVersion=%WindowsSDKVersion% /p:OutDir=lib + move lib\* ..\..\..\..\..\osi_release\lib\ + msbuild libOsi.vcxproj /p:Configuration=Debug /p:Platform=x64 /p:DefaultWindowsSDKVersion=%WindowsSDKVersion% /p:OutDir=lib + move lib\* ..\..\..\..\..\osi_debug\lib\ + + echo "Compile libOsiCpx" + cd ..\..\..\src\OsiCpx + cl /EHsc OsiCpxSolverInterface.cpp /I ..\Osi /I ..\..\..\CoinUtils\src /I "%DOWNWARD_CPLEX_ROOT%\include\ilcplex" /c + lib OsiCpxSolverInterface.obj + move OsiCpxSolverInterface.lib ..\..\..\..\osi_release\lib\libOsiCpx.lib + cl /EHsc OsiCpxSolverInterface.cpp /I ..\Osi /I ..\..\..\CoinUtils\src /I "%DOWNWARD_CPLEX_ROOT%\include\ilcplex" /c /MTd + lib OsiCpxSolverInterface.obj + move OsiCpxSolverInterface.lib ..\..\..\..\osi_debug\lib\libOsiCpx.lib + + echo "Compile libCoinUtils" + cd ..\..\..\CoinUtils\MSVisualStudio\v10 + devenv CoinUtils.sln /Upgrade + cd libCoinUtils + %SET_RUNTIME_LIBRARY% libCoinUtils.vcxproj libCoinUtils.vcxproj + msbuild libCoinUtils.vcxproj /p:Configuration=Release /p:Platform=x64 /p:DefaultWindowsSDKVersion=%WindowsSDKVersion% /p:OutDir=lib + move lib\* ..\..\..\..\..\osi_release\lib\ + msbuild libCoinUtils.vcxproj /p:Configuration=Debug /p:Platform=x64 /p:DefaultWindowsSDKVersion=%WindowsSDKVersion% /p:OutDir=lib + move lib\* ..\..\..\..\..\osi_debug\lib\ + + + - name: Compile planner + shell: cmd + run: | + call "${{ matrix.platform.vc }}" %ARCH% + python build.py release + python build.py debug + + - name: Install tox + run: | + pip3 install tox + + - name: Run translator and search tests + shell: cmd + # We do not run driver tests here because that would require + # VAL to be installed, which currently cannot be easily done + # on Windows for the version of VAL we use. When the maintainers + # of VAL fix the latest version to accept plans without time + # steps, we hope to be able to install VAL natively on Windows. + run: | + call "${{ matrix.platform.vc }}" %ARCH% + rem "dumpbin /dependents builds\release\bin\downward.exe shows that" + rem "downward.exe depends on cplexXYZ.dll. Thus, we have to add it to" + rem "the PATH. On my local CPLEX installation this is done" + rem "automatically. For the GitHub Action we have to do it manually:" + set PATH=%PATH%;D:\a\downward\cplex_temp\opl\bin\x64_win64/ + cd misc/ + tox -e translator,search - - name: test + - name: Run CPLEX tests shell: cmd + if: ${{ env.CPLEX_URL != 0 }} run: | + call "${{ matrix.platform.vc }}" %ARCH% + set PATH=%PATH%;D:\a\downward\cplex_temp\opl\bin\x64_win64/ cd misc/ - "${{ env.VC }}" ${{ env.ARCH }} & tox -e search,translator + tox -e cplex ... diff --git a/CHANGES.md b/CHANGES.md index 9a68b03623..e6c73873eb 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -28,6 +28,7 @@ after the corresponding tracker issues. - For developers: move functionality used during search away from LandmarkGraph, making it constant after creation. + - For developers: new state class @@ -57,6 +58,13 @@ after the corresponding tracker issues. USE_GLIBCXX_DEBUG. The build configurations debugnolp and releasenolp have been removed, and the build configuration glibcxx_debug has been added. +- For developers: decide on rules regarding software support and + improve Github actions accordingly + + +- For developers: add CPLEX support to our GitHub Actions for Windows + + - Fix a bug in the computation of RHW landmarks diff --git a/README.md b/README.md index a87ce409f9..51aabc2c8e 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,22 @@ For further information: - Fast Downward main repository: +## Tested software versions + +This version of Fast Downward has been tested with the following software versions: + +| OS | Python | C++ compiler | CMake | +| ------------ | ------ | ---------------------------------------------------------------- | ----- | +| Ubuntu 20.04 | 3.8 | GCC 9, GCC 10, Clang 10, Clang 11 | 3.16 | +| Ubuntu 18.04 | 3.6 | GCC 7, Clang 6 | 3.10 | +| macOS 10.15 | 3.6 | AppleClang 12 | 3.19 | +| Windows 10 | 3.6 | Visual Studio Enterprise 2017 (MSVC 19.16) and 2019 (MSVC 19.28) | 3.19 | + +We test LP support with CPLEX 12.9, SoPlex 3.1.1 and Osi 0.107.9. +On Ubuntu, we test both CPLEX and SoPlex. On Windows, we currently +only test CPLEX, and on macOS, we do not test LP solvers (yet). + + ## Contributors The following list includes all people that actively contributed to diff --git a/driver/arguments.py b/driver/arguments.py index a85d129f96..bfd38b4218 100644 --- a/driver/arguments.py +++ b/driver/arguments.py @@ -52,25 +52,25 @@ EXAMPLES = [ ("Translate and find a plan with A* + LM-Cut:", - ["./fast-downward.py", "misc/tests/benchmarks/gripper/prob01.pddl", + ["misc/tests/benchmarks/gripper/prob01.pddl", "--search", '"astar(lmcut())"']), ("Translate and run no search:", - ["./fast-downward.py", "--translate", + ["--translate", "misc/tests/benchmarks/gripper/prob01.pddl"]), ("Run predefined configuration (LAMA-2011) on translated task:", - ["./fast-downward.py", "--alias", "seq-sat-lama-2011", "output.sas"]), + ["--alias", "seq-sat-lama-2011", "output.sas"]), ("Run a portfolio on a translated task:", - ["./fast-downward.py", "--portfolio", EXAMPLE_PORTFOLIO, + ["--portfolio", EXAMPLE_PORTFOLIO, "--search-time-limit", "30m", "output.sas"]), ("Run the search component in debug mode (with assertions enabled) " "and validate the resulting plan:", - ["./fast-downward.py", "--debug", "output.sas", "--search", '"astar(ipdb())"']), + ["--debug", "output.sas", "--search", '"astar(ipdb())"']), ("Pass options to translator and search components:", - ["./fast-downward.py", "misc/tests/benchmarks/gripper/prob01.pddl", + ["misc/tests/benchmarks/gripper/prob01.pddl", "--translate-options", "--full-encoding", "--search-options", "--search", '"astar(lmcut())"']), ("Find a plan and validate it:", - ["./fast-downward.py", "--validate", + ["--validate", "misc/tests/benchmarks/gripper/prob01.pddl", "--search", '"astar(cegar())"']), ] @@ -84,7 +84,7 @@ Examples: %s -""" % "\n\n".join("%s\n%s" % (desc, " ".join(cmd)) for desc, cmd in EXAMPLES) +""" % "\n\n".join("%s\n%s" % (desc, " ".join([os.path.basename(sys.argv[0])] + parameters)) for desc, parameters in EXAMPLES) COMPONENTS_PLUS_OVERALL = ["translate", "search", "validate", "overall"] DEFAULT_SAS_FILE = "output.sas" diff --git a/driver/portfolio_runner.py b/driver/portfolio_runner.py index a1665278e9..3fd7900609 100644 --- a/driver/portfolio_runner.py +++ b/driver/portfolio_runner.py @@ -15,8 +15,8 @@ __all__ = ["run"] -import os import subprocess +import sys from . import call from . import limits @@ -219,8 +219,8 @@ def run(portfolio, executable, sas_file, plan_manager, time, memory): "Please pass a time limit to fast-downward.py.") if time is None: - if os.name == "nt": - returncodes.exit_with_driver_unsupported_error(limits.RESOURCE_MODULE_MISSING_MSG) + if sys.platform == "win32": + returncodes.exit_with_driver_unsupported_error(limits.CANNOT_LIMIT_TIME_MSG) else: returncodes.exit_with_driver_input_error( "Portfolios need a time limit. Please pass --search-time-limit " diff --git a/driver/tests.py b/driver/tests.py index c6bd6080ca..eaa74dcfa5 100644 --- a/driver/tests.py +++ b/driver/tests.py @@ -6,6 +6,7 @@ import os import subprocess +import sys import pytest @@ -18,59 +19,62 @@ def translate(): """Create translated task.""" - cmd = ["./fast-downward.py", "--translate", + cmd = [sys.executable, "fast-downward.py", "--translate", "misc/tests/benchmarks/gripper/prob01.pddl"] subprocess.check_call(cmd, cwd=REPO_ROOT_DIR) def cleanup(): - subprocess.check_call(["./fast-downward.py", "--cleanup"], + subprocess.check_call([sys.executable, "fast-downward.py", "--cleanup"], cwd=REPO_ROOT_DIR) -def run_driver(cmd): +def run_driver(parameters): cleanup() translate() + cmd = [sys.executable, "fast-downward.py"] + parameters return subprocess.check_call(cmd, cwd=REPO_ROOT_DIR) def test_commandline_args(): for description, cmd in EXAMPLES: - cmd = [x.strip('"') for x in cmd] - run_driver(cmd) + parameters = [x.strip('"') for x in cmd] + run_driver(parameters) def test_aliases(): for alias, config in ALIASES.items(): - cmd = ["./fast-downward.py", "--alias", alias, "output.sas"] - run_driver(cmd) + parameters = ["--alias", alias, "output.sas"] + run_driver(parameters) def test_show_aliases(): - run_driver(["./fast-downward.py", "--show-aliases"]) + run_driver(["--show-aliases"]) def test_portfolios(): for name, portfolio in PORTFOLIOS.items(): - cmd = ["./fast-downward.py", "--portfolio", portfolio, - "--search-time-limit", "30m", "output.sas"] - run_driver(cmd) + parameters = ["--portfolio", portfolio, + "--search-time-limit", "30m", "output.sas"] + run_driver(parameters) +@pytest.mark.skipif(not limits.can_set_time_limit(), reason="Cannot set time limits on this system") def test_hard_time_limit(): def preexec_fn(): limits.set_time_limit(10) - cmd = [ - "./fast-downward.py", "--translate", "--translate-time-limit", + driver = [sys.executable, "fast-downward.py"] + parameters = [ + "--translate", "--translate-time-limit", "10s", "misc/tests/benchmarks/gripper/prob01.pddl"] - subprocess.check_call(cmd, preexec_fn=preexec_fn, cwd=REPO_ROOT_DIR) + subprocess.check_call(driver + parameters, preexec_fn=preexec_fn, cwd=REPO_ROOT_DIR) - cmd = [ - "./fast-downward.py", "--translate", "--translate-time-limit", + parameters = [ + "--translate", "--translate-time-limit", "20s", "misc/tests/benchmarks/gripper/prob01.pddl"] with pytest.raises(subprocess.CalledProcessError) as exception_info: - subprocess.check_call(cmd, preexec_fn=preexec_fn, cwd=REPO_ROOT_DIR) + subprocess.check_call(driver + parameters, preexec_fn=preexec_fn, cwd=REPO_ROOT_DIR) assert exception_info.value.returncode == returncodes.DRIVER_INPUT_ERROR diff --git a/experiments/issue1000/common_setup.py b/experiments/issue1000/common_setup.py new file mode 100644 index 0000000000..f2bbda8569 --- /dev/null +++ b/experiments/issue1000/common_setup.py @@ -0,0 +1,398 @@ +# -*- coding: utf-8 -*- + +import itertools +import os +import platform +import subprocess +import sys + +from lab.experiment import ARGPARSER +from lab import tools + +from downward.experiment import FastDownwardExperiment +from downward.reports.absolute import AbsoluteReport +from downward.reports.compare import ComparativeReport +from downward.reports.scatter import ScatterPlotReport + + +def parse_args(): + ARGPARSER.add_argument( + "--test", + choices=["yes", "no", "auto"], + default="auto", + dest="test_run", + help="test experiment locally on a small suite if --test=yes or " + "--test=auto and we are not on a cluster") + return ARGPARSER.parse_args() + +ARGS = parse_args() + + +DEFAULT_OPTIMAL_SUITE = [ + 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', + 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', + 'data-network-opt18-strips', 'depot', 'driverlog', + 'elevators-opt08-strips', 'elevators-opt11-strips', + 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', + 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', + 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', + 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', + 'openstacks-opt11-strips', 'openstacks-opt14-strips', + 'openstacks-strips', 'organic-synthesis-opt18-strips', + 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', + 'parcprinter-opt11-strips', 'parking-opt11-strips', + 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', + 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', + 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', + 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', + 'snake-opt18-strips', 'sokoban-opt08-strips', + 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', + 'termes-opt18-strips', 'tetris-opt14-strips', + 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', + 'transport-opt08-strips', 'transport-opt11-strips', + 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', + 'visitall-opt14-strips', 'woodworking-opt08-strips', + 'woodworking-opt11-strips', 'zenotravel'] + +DEFAULT_SATISFICING_SUITE = [ + 'agricola-sat18-strips', 'airport', 'assembly', + 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', + 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', + 'childsnack-sat14-strips', 'citycar-sat14-adl', + 'data-network-sat18-strips', 'depot', 'driverlog', + 'elevators-sat08-strips', 'elevators-sat11-strips', + 'flashfill-sat18-adl', 'floortile-sat11-strips', + 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', + 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', + 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', + 'miconic-simpleadl', 'movie', 'mprime', 'mystery', + 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', + 'openstacks-sat08-adl', 'openstacks-sat08-strips', + 'openstacks-sat11-strips', 'openstacks-sat14-strips', + 'openstacks-strips', 'optical-telegraphs', + 'organic-synthesis-sat18-strips', + 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', + 'parcprinter-sat11-strips', 'parking-sat11-strips', + 'parking-sat14-strips', 'pathways', 'pathways-noneg', + 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', + 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', + 'psr-middle', 'psr-small', 'rovers', 'satellite', + 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', + 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', + 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', + 'termes-sat18-strips', 'tetris-sat14-strips', + 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', + 'transport-sat08-strips', 'transport-sat11-strips', + 'transport-sat14-strips', 'trucks', 'trucks-strips', + 'visitall-sat11-strips', 'visitall-sat14-strips', + 'woodworking-sat08-strips', 'woodworking-sat11-strips', + 'zenotravel'] + + +def get_script(): + """Get file name of main script.""" + return tools.get_script_path() + + +def get_script_dir(): + """Get directory of main script. + + Usually a relative directory (depends on how it was called by the user.)""" + return os.path.dirname(get_script()) + + +def get_experiment_name(): + """Get name for experiment. + + Derived from the absolute filename of the main script, e.g. + "/ham/spam/eggs.py" => "spam-eggs".""" + script = os.path.abspath(get_script()) + script_dir = os.path.basename(os.path.dirname(script)) + script_base = os.path.splitext(os.path.basename(script))[0] + return "%s-%s" % (script_dir, script_base) + + +def get_data_dir(): + """Get data dir for the experiment. + + This is the subdirectory "data" of the directory containing + the main script.""" + return os.path.join(get_script_dir(), "data", get_experiment_name()) + + +def get_repo_base(): + """Get base directory of the repository, as an absolute path. + + Search upwards in the directory tree from the main script until a + directory with a subdirectory named ".git" is found. + + Abort if the repo base cannot be found.""" + path = os.path.abspath(get_script_dir()) + while os.path.dirname(path) != path: + if os.path.exists(os.path.join(path, ".git")): + return path + path = os.path.dirname(path) + sys.exit("repo base could not be found") + + +def is_running_on_cluster(): + node = platform.node() + return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") + + +def is_test_run(): + return ARGS.test_run == "yes" or ( + ARGS.test_run == "auto" and not is_running_on_cluster()) + + +def get_algo_nick(revision, config_nick): + return "{revision}-{config_nick}".format(**locals()) + + +class IssueConfig(object): + """Hold information about a planner configuration. + + See FastDownwardExperiment.add_algorithm() for documentation of the + constructor's options. + + """ + def __init__(self, nick, component_options, + build_options=None, driver_options=None): + self.nick = nick + self.component_options = component_options + self.build_options = build_options + self.driver_options = driver_options + + +class IssueExperiment(FastDownwardExperiment): + """Subclass of FastDownwardExperiment with some convenience features.""" + + DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] + + DEFAULT_TABLE_ATTRIBUTES = [ + "cost", + "coverage", + "error", + "evaluations", + "expansions", + "expansions_until_last_jump", + "initial_h_value", + "generated", + "memory", + "planner_memory", + "planner_time", + "quality", + "run_dir", + "score_evaluations", + "score_expansions", + "score_generated", + "score_memory", + "score_search_time", + "score_total_time", + "search_time", + "total_time", + ] + + DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ + "evaluations", + "expansions", + "expansions_until_last_jump", + "initial_h_value", + "memory", + "search_time", + "total_time", + ] + + PORTFOLIO_ATTRIBUTES = [ + "cost", + "coverage", + "error", + "plan_length", + "run_dir", + ] + + def __init__(self, revisions=None, configs=None, path=None, **kwargs): + """ + + You can either specify both *revisions* and *configs* or none + of them. If they are omitted, you will need to call + exp.add_algorithm() manually. + + If *revisions* is given, it must be a non-empty list of + revision identifiers, which specify which planner versions to + use in the experiment. The same versions are used for + translator, preprocessor and search. :: + + IssueExperiment(revisions=["issue123", "4b3d581643"], ...) + + If *configs* is given, it must be a non-empty list of + IssueConfig objects. :: + + IssueExperiment(..., configs=[ + IssueConfig("ff", ["--search", "eager_greedy(ff())"]), + IssueConfig( + "lama", [], + driver_options=["--alias", "seq-sat-lama-2011"]), + ]) + + If *path* is specified, it must be the path to where the + experiment should be built (e.g. + /home/john/experiments/issue123/exp01/). If omitted, the + experiment path is derived automatically from the main + script's filename. Example:: + + script = experiments/issue123/exp01.py --> + path = experiments/issue123/data/issue123-exp01/ + + """ + + path = path or get_data_dir() + + FastDownwardExperiment.__init__(self, path=path, **kwargs) + + if (revisions and not configs) or (not revisions and configs): + raise ValueError( + "please provide either both or none of revisions and configs") + + for rev in revisions: + for config in configs: + self.add_algorithm( + get_algo_nick(rev, config.nick), + get_repo_base(), + rev, + config.component_options, + build_options=config.build_options, + driver_options=config.driver_options) + + self._revisions = revisions + self._configs = configs + + @classmethod + def _is_portfolio(cls, config_nick): + return "fdss" in config_nick + + @classmethod + def get_supported_attributes(cls, config_nick, attributes): + if cls._is_portfolio(config_nick): + return [attr for attr in attributes + if attr in cls.PORTFOLIO_ATTRIBUTES] + return attributes + + def add_absolute_report_step(self, **kwargs): + """Add step that makes an absolute report. + + Absolute reports are useful for experiments that don't compare + revisions. + + The report is written to the experiment evaluation directory. + + All *kwargs* will be passed to the AbsoluteReport class. If the + keyword argument *attributes* is not specified, a default list + of attributes is used. :: + + exp.add_absolute_report_step(attributes=["coverage"]) + + """ + kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) + report = AbsoluteReport(**kwargs) + outfile = os.path.join( + self.eval_dir, + get_experiment_name() + "." + report.output_format) + self.add_report(report, outfile=outfile) + self.add_step( + 'publish-absolute-report', subprocess.call, ['publish', outfile]) + + def add_comparison_table_step(self, **kwargs): + """Add a step that makes pairwise revision comparisons. + + Create comparative reports for all pairs of Fast Downward + revisions. Each report pairs up the runs of the same config and + lists the two absolute attribute values and their difference + for all attributes in kwargs["attributes"]. + + All *kwargs* will be passed to the CompareConfigsReport class. + If the keyword argument *attributes* is not specified, a + default list of attributes is used. :: + + exp.add_comparison_table_step(attributes=["coverage"]) + + """ + kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) + + def make_comparison_tables(): + for rev1, rev2 in itertools.combinations(self._revisions, 2): + compared_configs = [] + for config in self._configs: + config_nick = config.nick + compared_configs.append( + ("%s-%s" % (rev1, config_nick), + "%s-%s" % (rev2, config_nick), + "Diff (%s)" % config_nick)) + report = ComparativeReport(compared_configs, **kwargs) + outfile = os.path.join( + self.eval_dir, + "%s-%s-%s-compare.%s" % ( + self.name, rev1, rev2, report.output_format)) + report(self.eval_dir, outfile) + + def publish_comparison_tables(): + for rev1, rev2 in itertools.combinations(self._revisions, 2): + outfile = os.path.join( + self.eval_dir, + "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) + subprocess.call(["publish", outfile]) + + self.add_step("make-comparison-tables", make_comparison_tables) + self.add_step( + "publish-comparison-tables", publish_comparison_tables) + + def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): + """Add step creating (relative) scatter plots for all revision pairs. + + Create a scatter plot for each combination of attribute, + configuration and revisions pair. If *attributes* is not + specified, a list of common scatter plot attributes is used. + For portfolios all attributes except "cost", "coverage" and + "plan_length" will be ignored. :: + + exp.add_scatter_plot_step(attributes=["expansions"]) + + """ + if relative: + scatter_dir = os.path.join(self.eval_dir, "scatter-relative") + step_name = "make-relative-scatter-plots" + else: + scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") + step_name = "make-absolute-scatter-plots" + if attributes is None: + attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES + + def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): + name = "-".join([self.name, rev1, rev2, attribute, config_nick]) + if config_nick2 is not None: + name += "-" + config_nick2 + print("Make scatter plot for", name) + algo1 = get_algo_nick(rev1, config_nick) + algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) + report = ScatterPlotReport( + filter_algorithm=[algo1, algo2], + attributes=[attribute], + relative=relative, + get_category=lambda run1, run2: run1["domain"]) + report( + self.eval_dir, + os.path.join(scatter_dir, rev1 + "-" + rev2, name)) + + def make_scatter_plots(): + for config in self._configs: + print(config) + for rev1, rev2 in itertools.combinations(self._revisions, 2): + print(rev1, rev2) + for attribute in self.get_supported_attributes( + config.nick, attributes): + print(attribute) + make_scatter_plot(config.nick, rev1, rev2, attribute) + for nick1, nick2, rev1, rev2, attribute in additional: + make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) + + self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue1000/optimal.py b/experiments/issue1000/optimal.py new file mode 100755 index 0000000000..b0e1f9d702 --- /dev/null +++ b/experiments/issue1000/optimal.py @@ -0,0 +1,69 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + + +import os + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment + +import common_setup +from common_setup import IssueConfig, IssueExperiment + + +def make_comparison_table(): + report = common_setup.ComparativeReport( + algorithm_pairs=[ + ("issue1000-base-seq-opt-bjolp", "issue1000-v11-seq-opt-bjolp"), + ("issue1000-base-seq-opt-bjolp", "issue1000-v12-seq-opt-bjolp"), + ("issue1000-base-seq-opt-bjolp", "issue1000-v13-seq-opt-bjolp"), + ], attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES, + ) + outfile = os.path.join( + exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) + ) + report(exp.eval_dir, outfile) + + exp.add_report(report) + + +DIR = os.path.dirname(os.path.abspath(__file__)) +SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REVISIONS = ["issue1000-base", "issue1000-v11", "issue1000-v12", + "issue1000-v13"] + +CONFIGS = [ + IssueConfig("seq-opt-bjolp", [], + driver_options=["--alias", "seq-opt-bjolp"]), +] + +SUITE = common_setup.DEFAULT_OPTIMAL_SUITE +ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="clemens.buechner@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"], +) + +if common_setup.is_test_run(): + SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=2) + +exp = common_setup.IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) + +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) + +exp.add_step("build", exp.build) +exp.add_step("start", exp.start_runs) +exp.add_fetcher(name="fetch") +exp.add_step("comparison table", make_comparison_table) + +exp.run_steps() + diff --git a/experiments/issue1000/requirements.txt b/experiments/issue1000/requirements.txt new file mode 100644 index 0000000000..d201a74510 --- /dev/null +++ b/experiments/issue1000/requirements.txt @@ -0,0 +1,11 @@ +cycler==0.10.0 +kiwisolver==1.3.1 +lab==6.2 +matplotlib==3.3.3 +numpy==1.19.5 +Pillow==8.1.0 +pyparsing==2.4.7 +python-dateutil==2.8.1 +simplejson==3.17.2 +six==1.15.0 +txt2tags==3.7 diff --git a/experiments/issue1000/satisficing.py b/experiments/issue1000/satisficing.py new file mode 100755 index 0000000000..dc64a5349f --- /dev/null +++ b/experiments/issue1000/satisficing.py @@ -0,0 +1,70 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + + +import os + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment + +import common_setup +from common_setup import IssueConfig, IssueExperiment + + +def make_comparison_table(): + report = common_setup.ComparativeReport( + algorithm_pairs=[ + ("issue1000-base-lama-first", "issue1000-v11-lama-first"), + ("issue1000-base-lama-first", "issue1000-v12-lama-first"), + ("issue1000-base-lama-first", "issue1000-v13-lama-first"), + ], attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES, + ) + outfile = os.path.join( + exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) + ) + report(exp.eval_dir, outfile) + + exp.add_report(report) + + +DIR = os.path.dirname(os.path.abspath(__file__)) +SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REVISIONS = ["issue1000-base", "issue1000-v11", "issue1000-v12", + "issue1000-v13"] + +CONFIGS = [ + IssueConfig("lama-first", [], + driver_options=["--alias", "lama-first"]), +] + +SUITE = common_setup.DEFAULT_SATISFICING_SUITE +ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="clemens.buechner@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"], +) + +if common_setup.is_test_run(): + SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=2) + +exp = common_setup.IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) + +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.ANYTIME_SEARCH_PARSER) +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) + +exp.add_step("build", exp.build) +exp.add_step("start", exp.start_runs) +exp.add_fetcher(name="fetch") +exp.add_step("comparison table", make_comparison_table) + +exp.run_steps() + diff --git a/experiments/issue1000/v14-optimal.py b/experiments/issue1000/v14-optimal.py new file mode 100755 index 0000000000..8eea775de3 --- /dev/null +++ b/experiments/issue1000/v14-optimal.py @@ -0,0 +1,52 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + + +import os + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment + +import common_setup +from common_setup import IssueConfig, IssueExperiment + + +DIR = os.path.dirname(os.path.abspath(__file__)) +SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REVISIONS = ["issue1000-base", "issue1000-v14"] + +CONFIGS = [ + IssueConfig("seq-opt-bjolp", [], + driver_options=["--alias", "seq-opt-bjolp"]), +] + +SUITE = common_setup.DEFAULT_OPTIMAL_SUITE +ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="clemens.buechner@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"], +) + +if common_setup.is_test_run(): + SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=2) + +exp = common_setup.IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) + +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) + +exp.add_step("build", exp.build) +exp.add_step("start", exp.start_runs) +exp.add_fetcher(name="fetch") +exp.add_comparison_table_step() + +exp.run_steps() + diff --git a/experiments/issue1000/v14-satisficing.py b/experiments/issue1000/v14-satisficing.py new file mode 100755 index 0000000000..2caee9d733 --- /dev/null +++ b/experiments/issue1000/v14-satisficing.py @@ -0,0 +1,53 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + + +import os + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment + +import common_setup +from common_setup import IssueConfig, IssueExperiment + + +DIR = os.path.dirname(os.path.abspath(__file__)) +SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REVISIONS = ["issue1000-base", "issue1000-v14"] + +CONFIGS = [ + IssueConfig("lama-first", [], + driver_options=["--alias", "lama-first"]), +] + +SUITE = common_setup.DEFAULT_SATISFICING_SUITE +ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="clemens.buechner@unibas.ch", + export=["PATH", "DOWNWARD_BENCHMARKS"], +) + +if common_setup.is_test_run(): + SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=2) + +exp = common_setup.IssueExperiment( + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) + +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.ANYTIME_SEARCH_PARSER) +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) + +exp.add_step("build", exp.build) +exp.add_step("start", exp.start_runs) +exp.add_fetcher(name="fetch") +exp.add_comparison_table_step() + +exp.run_steps() + diff --git a/misc/autodoc/autodoc.py b/misc/autodoc/autodoc.py index c6a4a70cb5..7084045b6a 100755 --- a/misc/autodoc/autodoc.py +++ b/misc/autodoc/autodoc.py @@ -125,7 +125,7 @@ def make_doc_link(m): return text def build_planner(build): - subprocess.check_call(["./build.py", build, "downward"], cwd=REPO_ROOT_DIR) + subprocess.check_call([sys.executable, "build.py", build, "downward"], cwd=REPO_ROOT_DIR) def get_pages_from_planner(build): out = subprocess.check_output( diff --git a/misc/tests/configs.py b/misc/tests/configs.py index 934ee87550..4dbd354622 100644 --- a/misc/tests/configs.py +++ b/misc/tests/configs.py @@ -126,11 +126,6 @@ def configs_satisficing_core(): def configs_optimal_extended(): return { - "astar_lmcount_lm_merged_rhw_hm_no_order": [ - "--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", - "--search", - "astar(lmc,lazy_evaluator=lmc)"], "astar_cegar": [ "--search", "astar(cegar())"], @@ -202,10 +197,10 @@ def configs_satisficing_extended(): } -def configs_optimal_lp(): +def configs_optimal_lp(lp_solver="CPLEX"): return { - "divpot": ["--search", "astar(diverse_potentials())"], - "seq+lmcut": ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"], + "divpot": ["--search", f"astar(diverse_potentials(lpsolver={lp_solver}))"], + "seq+lmcut": ["--search", f"astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver={lp_solver}))"], } diff --git a/misc/tests/test-standard-configs.py b/misc/tests/test-standard-configs.py index 503dba55ce..f8cb87020b 100644 --- a/misc/tests/test-standard-configs.py +++ b/misc/tests/test-standard-configs.py @@ -15,9 +15,9 @@ PLAN_FILE = os.path.join(REPO, "test.plan") TASK = os.path.join(BENCHMARKS_DIR, "miconic/s1-0.pddl") -CONFIGS = {} -CONFIGS.update(configs.default_configs_optimal(core=True, extended=True)) -CONFIGS.update(configs.default_configs_satisficing(core=True, extended=True)) +CONFIGS_NOLP = {} +CONFIGS_NOLP.update(configs.default_configs_optimal(core=True, extended=True)) +CONFIGS_NOLP.update(configs.default_configs_satisficing(core=True, extended=True)) def escape_list(l): @@ -52,9 +52,21 @@ def setup_module(module): translate(TASK) -@pytest.mark.parametrize("config", sorted(CONFIGS.values())) +@pytest.mark.parametrize("config", sorted(CONFIGS_NOLP.values())) @pytest.mark.parametrize("debug", [False, True]) -def test_configs(config, debug): +def test_configs_nolp(config, debug): + run_plan_script(SAS_FILE, config, debug) + + +@pytest.mark.parametrize("config", sorted(configs.configs_optimal_lp(lp_solver="CPLEX").values())) +@pytest.mark.parametrize("debug", [False, True]) +def test_configs_cplex(config, debug): + run_plan_script(SAS_FILE, config, debug) + + +@pytest.mark.parametrize("config", sorted(configs.configs_optimal_lp(lp_solver="SOPLEX").values())) +@pytest.mark.parametrize("debug", [False, True]) +def test_configs_soplex(config, debug): run_plan_script(SAS_FILE, config, debug) diff --git a/misc/tests/test-translator.py b/misc/tests/test-translator.py index caabf260eb..52a5231563 100755 --- a/misc/tests/test-translator.py +++ b/misc/tests/test-translator.py @@ -3,7 +3,6 @@ HELP = """\ Check that translator is deterministic. - Run the translator multiple times to test that the log and the output file are the same for every run. Obviously, there might be false negatives, i.e., different runs might lead to the same nondeterministic results. @@ -11,17 +10,17 @@ import argparse from collections import defaultdict -from distutils.spawn import find_executable import itertools import os +from pathlib import Path import re import subprocess import sys -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO = os.path.dirname(os.path.dirname(DIR)) -DRIVER = os.path.join(REPO, "fast-downward.py") +DIR = Path(__file__).resolve().parent +REPO = DIR.parents[1] +DRIVER = REPO / "fast-downward.py" def parse_args(): @@ -34,25 +33,27 @@ def parse_args(): help='Use "all" to test all benchmarks, ' '"first" to test the first task of each domain (default), ' 'or ":" to test individual tasks') + parser.add_argument( + "--runs-per-task", + help="translate each task this many times and compare the outputs", + type=int, default=3) args = parser.parse_args() - args.benchmarks_dir = os.path.abspath(args.benchmarks_dir) + args.benchmarks_dir = Path(args.benchmarks_dir).resolve() return args def get_task_name(path): - return "-".join(path.split("/")[-2:]) + return "-".join(str(path).split("/")[-2:]) def translate_task(task_file): - python = sys.executable - print("Translate {} with {}".format(get_task_name(task_file), python)) + print(f"Translate {get_task_name(task_file)}", flush=True) sys.stdout.flush() - cmd = [python, DRIVER, "--translate", task_file] + cmd = [sys.executable, str(DRIVER), "--translate", str(task_file)] try: - output = subprocess.check_output(cmd) + output = subprocess.check_output(cmd, encoding=sys.getfilesystemencoding()) except OSError as err: - sys.exit("Call failed: {}\n{}".format(" ".join(cmd), err)) - output = str(output) + sys.exit(f"Call failed: {' '.join(cmd)}\n{err}") # Remove information that may differ between calls. for pattern in [ r"\[.+s CPU, .+s wall-clock\]", @@ -68,22 +69,23 @@ def _get_all_tasks_by_domain(benchmarks_dir): # seems to be detrimental on some other domains. blacklisted_domains = [ "agricola-sat18-strips", - "citycar-opt14-adl", # cf. issue875 - "citycar-sat14-adl", # cf. issue875 + "citycar-opt14-adl", # cf. issue879 + "citycar-sat14-adl", # cf. issue879 "organic-synthesis-sat18-strips", "organic-synthesis-split-opt18-strips", "organic-synthesis-split-sat18-strips"] + benchmarks_dir = Path(benchmarks_dir) tasks = defaultdict(list) domains = [ - name for name in os.listdir(benchmarks_dir) - if os.path.isdir(os.path.join(benchmarks_dir, name)) and - not name.startswith((".", "_")) and - name not in blacklisted_domains] + domain_dir for domain_dir in benchmarks_dir.iterdir() + if domain_dir.is_dir() and + not str(domain_dir.name).startswith((".", "_", "unofficial")) and + str(domain_dir.name) not in blacklisted_domains] for domain in domains: - path = os.path.join(benchmarks_dir, domain) + path = benchmarks_dir / domain tasks[domain] = [ - os.path.join(benchmarks_dir, domain, f) - for f in sorted(os.listdir(path)) if "domain" not in f] + benchmarks_dir / domain / f + for f in sorted(path.iterdir()) if "domain" not in str(f)] return sorted(tasks.values()) @@ -100,15 +102,13 @@ def get_tasks(args): else: # Add task from command line. task = task.replace(":", "/") - suite.append(os.path.join(args.benchmarks_dir, task)) + suite.append(args.benchmarks_dir / task) return sorted(set(suite)) def cleanup(): - # We can't use the driver's cleanup function since the files are renamed. - for f in os.listdir("."): - if f.endswith(".sas"): - os.remove(f) + for f in Path(".").glob("translator-output-*.txt"): + f.unlink() def write_combined_output(output_file, task): @@ -123,20 +123,19 @@ def main(): args = parse_args() os.chdir(DIR) cleanup() - subprocess.check_call([sys.executable, "build.py", "translate"], cwd=REPO) for task in get_tasks(args): - write_combined_output("base.sas", task) - for iteration in range(2): - write_combined_output("output{}.sas".format(iteration), task) - print("Compare translator output", flush=True) - files = ["base.sas", "output{}.sas".format(iteration)] + base_file = "translator-output-0.txt" + write_combined_output(base_file, task) + for i in range(1, args.runs_per_task): + compared_file = f"translator-output-{i}.txt" + write_combined_output(compared_file, task) + files = [base_file, compared_file] try: subprocess.check_call(["diff", "-q"] + files) except subprocess.CalledProcessError: - sys.exit( - "Error: Translator is nondeterministic for {task}.".format(**locals())) - print(flush=True) - cleanup() + sys.exit(f"Error: Translator is nondeterministic for {task}.") + print("Outputs match\n", flush=True) + cleanup() if __name__ == "__main__": diff --git a/misc/tox.ini b/misc/tox.ini index ae07969239..dd515e524e 100644 --- a/misc/tox.ini +++ b/misc/tox.ini @@ -1,23 +1,31 @@ # Note that we can't run fast-downward.py from within the misc/ # directory because the driver confuses the misc/release with the # builds/release directory. +# All tests (except for 'build' and 'autocdoc') assume that Fast +# Downward is already built. For the translator tests it is sufficient +# to build the 'translate' configuration. + [tox] -envlist = build, py36, py37, py38, translator, search, valgrind, clang-tidy, style +envlist = build, driver, translator, search, style, autodoc, clang-tidy basepython = python3 skip_missing_interpreters = true skipsdist = true -[testenv] +[testenv:autodoc] changedir = {toxinidir}/../ -deps = - pytest commands = - pytest driver/tests.py misc/tests/test-exitcodes.py bash -c "python3 misc/autodoc/autodoc.py --dry-run > /dev/null" whitelist_externals = bash +[testenv:driver] +changedir = {toxinidir}/../ +deps = + pytest +commands = + pytest driver/tests.py misc/tests/test-exitcodes.py + [testenv:build] changedir = {toxinidir}/../ passenv = @@ -39,7 +47,21 @@ changedir = {toxinidir}/tests/ deps = pytest commands = - pytest test-standard-configs.py + pytest test-standard-configs.py -k test_configs_nolp + +[testenv:cplex] +changedir = {toxinidir}/tests/ +deps = + pytest +commands = + pytest test-standard-configs.py -k test_configs_cplex + +[testenv:soplex] +changedir = {toxinidir}/tests/ +deps = + pytest +commands = + pytest test-standard-configs.py -k test_configs_soplex [testenv:valgrind] changedir = {toxinidir}/tests/ diff --git a/src/search/CMakeLists.txt b/src/search/CMakeLists.txt index 29f1bdd597..9dc1ead9b8 100644 --- a/src/search/CMakeLists.txt +++ b/src/search/CMakeLists.txt @@ -1,4 +1,6 @@ cmake_minimum_required(VERSION 2.8.3) +# For Windows we require CMake 3.12, but this is currently not +# available for Ubuntu 18.04. if(NOT FAST_DOWNWARD_MAIN_CMAKELISTS_READ) message( @@ -40,6 +42,7 @@ endif() # On Windows, find the psapi library for determining peak memory. if(WIN32) + cmake_policy(SET CMP0074 NEW) target_link_libraries(downward psapi) endif() diff --git a/src/search/landmarks/landmark_count_heuristic.cc b/src/search/landmarks/landmark_count_heuristic.cc index af94fda5df..c8479dd7f4 100644 --- a/src/search/landmarks/landmark_count_heuristic.cc +++ b/src/search/landmarks/landmark_count_heuristic.cc @@ -118,15 +118,11 @@ int LandmarkCountHeuristic::get_heuristic_value(const State &ancestor_state) { return static_cast(ceil(h_val - epsilon)); } else { int h = 0; - for (auto &lm : lgraph->get_nodes()) { - switch (lm_status_manager->get_landmark_status( - lm->get_id())) { - case lm_reached: - break; - case lm_not_reached: - case lm_needed_again: - h += lm->cost; - break; + for (int id = 0; id < lgraph->get_num_landmarks(); ++id) { + landmark_status status = + lm_status_manager->get_landmark_status(id); + if (status == lm_not_reached || status == lm_needed_again) { + h += lgraph->get_landmark(id)->cost; } } return h; diff --git a/src/search/landmarks/landmark_factory_rpg_sasp.h b/src/search/landmarks/landmark_factory_rpg_sasp.h index 5dee045258..9f48d3321b 100644 --- a/src/search/landmarks/landmark_factory_rpg_sasp.h +++ b/src/search/landmarks/landmark_factory_rpg_sasp.h @@ -12,7 +12,7 @@ class LandmarkFactoryRpgSasp : public LandmarkFactoryRelaxation { std::list open_landmarks; std::vector> disjunction_classes; - std::map> forward_orders; + std::unordered_map> forward_orders; // dtg_successors[var_id][val] contains all successor values of val in the // domain transition graph for the variable diff --git a/src/search/landmarks/landmark_status_manager.cc b/src/search/landmarks/landmark_status_manager.cc index 8d65b4d9e7..7e78c3efe8 100644 --- a/src/search/landmarks/landmark_status_manager.cc +++ b/src/search/landmarks/landmark_status_manager.cc @@ -1,7 +1,5 @@ #include "landmark_status_manager.h" -#include "landmark_graph.h" - #include "../utils/logging.h" using namespace std; @@ -17,12 +15,6 @@ LandmarkStatusManager::LandmarkStatusManager(LandmarkGraph &graph) lm_graph(graph) { } -landmark_status LandmarkStatusManager::get_landmark_status( - size_t id) const { - assert(static_cast(id) < lm_graph.get_num_landmarks()); - return lm_status[id]; -} - BitsetView LandmarkStatusManager::get_reached_landmarks(const State &state) { return reached_lms[state]; } @@ -115,15 +107,13 @@ bool LandmarkStatusManager::update_reached_lms(const State &parent_ancestor_stat void LandmarkStatusManager::update_lm_status(const State &ancestor_state) { const BitsetView reached = get_reached_landmarks(ancestor_state); - const LandmarkGraph::Nodes &nodes = lm_graph.get_nodes(); - + const int num_landmarks = lm_graph.get_num_landmarks(); /* This first loop is necessary as setup for the *needed again* check in the second loop. */ - for (int id = 0; id < lm_graph.get_num_landmarks(); ++id) { + for (int id = 0; id < num_landmarks; ++id) { lm_status[id] = reached.test(id) ? lm_reached : lm_not_reached; } - for (auto &node : nodes) { - int id = node->get_id(); + for (int id = 0; id < num_landmarks; ++id) { if (lm_status[id] == lm_reached && landmark_needed_again(id, ancestor_state)) { lm_status[id] = lm_needed_again; diff --git a/src/search/landmarks/landmark_status_manager.h b/src/search/landmarks/landmark_status_manager.h index ff5177f0e2..15368b5605 100644 --- a/src/search/landmarks/landmark_status_manager.h +++ b/src/search/landmarks/landmark_status_manager.h @@ -1,6 +1,8 @@ #ifndef LANDMARKS_LANDMARK_STATUS_MANAGER_H #define LANDMARKS_LANDMARK_STATUS_MANAGER_H +#include "landmark_graph.h" + #include "../per_state_bitset.h" namespace landmarks { @@ -44,7 +46,10 @@ class LandmarkStatusManager { desired state is returned at all times, or an error is thrown if the desired information does not exist. */ - landmark_status get_landmark_status(size_t id) const; + landmark_status get_landmark_status(size_t id) const { + assert(static_cast(id) < lm_graph.get_num_landmarks()); + return lm_status[id]; + } }; }