diff --git a/.github/workflows/interface-unit-tests.yml b/.github/workflows/interface-unit-tests.yml index 601e762fc92..e08464c2c22 100644 --- a/.github/workflows/interface-unit-tests.yml +++ b/.github/workflows/interface-unit-tests.yml @@ -47,6 +47,14 @@ on: required: false type: boolean default: false + python_warning_level: + description: Sets the default Python warning level as defined by https://docs.python.org/3/using/cmdline.html#envvar-PYTHONWARNINGS + required: false + type: string + default: 'default' +env: + ACTIONS_RUNNER_DEBUG: true + ACTIONS_STEP_DEBUG: true jobs: setup-ci-load: @@ -66,6 +74,22 @@ jobs: "default": ["3.10"] } EOF + elif [ "${{ inputs.python_warning_level }}" == "error" ]; + then + cat >python_versions.json <<-EOF + { + "default": ["3.10"], + "torch-tests": ["3.10"], + "tf-tests": ["3.10"], + "jax-tests": ["3.10"], + "all-interfaces-tests": ["3.10"], + "external-libraries-tests": ["3.10"], + "qcut-tests": ["3.10"], + "qchem-tests": ["3.10"], + "gradients-tests": ["3.10"], + "data-tests": ["3.10"], + "device-tests": ["3.10"] } + EOF else cat >python_versions.json <<-EOF { @@ -102,6 +126,13 @@ jobs: "device-tests": 2 } EOF + elif [ "${{ inputs.python_warning_level }}" == "error" ]; + then + cat >matrix_max_parallel.json <<-EOF + { + "default": 1 + } + EOF else cat >matrix_max_parallel.json <<-EOF { @@ -118,6 +149,18 @@ jobs: jq . matrix_max_parallel.json echo "matrix_max_parallel=$(jq -r tostring matrix_max_parallel.json)" >> $GITHUB_OUTPUT + - name: Enable splitting of tests + id: job_split + run: | + if [ "${{ inputs.python_warning_level }}" == "error" ]; + then + export ENABLE_SPLIT='0' + else + export ENABLE_SPLIT='1' + fi + + echo "enable_split=$ENABLE_SPLIT" >> $GITHUB_OUTPUT + - name: Setup Job to Skip id: jobs_to_skip env: @@ -136,6 +179,7 @@ jobs: matrix-max-parallel: ${{ steps.max_parallel.outputs.matrix_max_parallel }} python-version: ${{ steps.python_versions.outputs.python_versions }} jobs-to-skip: ${{ steps.jobs_to_skip.outputs.jobs_to_skip }} + enable-split: ${{ steps.jobs_to_skip.outputs.enable_split }} torch-tests: needs: @@ -168,6 +212,7 @@ jobs: pytest_markers: torch and not qcut and not finite-diff and not param-shift requirements_file: ${{ github.event_name == 'schedule' && strategy.job-index == 0 && 'torch.txt' || '' }} disable_new_opmath: ${{ inputs.disable_new_opmath }} + python_warning_level: ${{ inputs.python_warning_level }} autograd-tests: @@ -200,6 +245,7 @@ jobs: pytest_coverage_flags: ${{ inputs.pytest_coverage_flags }} pytest_markers: autograd and not qcut and not finite-diff and not param-shift disable_new_opmath: ${{ inputs.disable_new_opmath }} + python_warning_level: ${{ inputs.python_warning_level }} tf-tests: @@ -212,7 +258,8 @@ jobs: || fromJSON(needs.setup-ci-load.outputs.matrix-max-parallel).default }} matrix: - group: [1, 2, 3] + group: >- + ${{ needs.setup-ci-load.outputs.enable-split == '1' && fromJSON('[1, 2, 3]') || fromJSON('[1]') }} python-version: >- ${{ fromJSON(needs.setup-ci-load.outputs.python-version).tf-tests @@ -232,12 +279,13 @@ jobs: install_pennylane_lightning_master: true pytest_coverage_flags: ${{ inputs.pytest_coverage_flags }} pytest_markers: tf and not qcut and not finite-diff and not param-shift - pytest_additional_args: --splits 3 --group ${{ matrix.group }} + pytest_additional_args: ${{ needs.setup-ci-load.outputs.enable-split == '1' && format('--splits {0} --group {1}', '3', matrix.group) || '' }} pytest_durations_file_path: '.github/workflows/tf_tests_durations.json' pytest_store_durations: ${{ inputs.pytest_store_durations }} additional_pip_packages: pytest-split requirements_file: ${{ github.event_name == 'schedule' && strategy.job-index == 0 && 'tf.txt' || '' }} disable_new_opmath: ${{ inputs.disable_new_opmath }} + python_warning_level: ${{ inputs.python_warning_level }} jax-tests: @@ -250,7 +298,7 @@ jobs: || fromJSON(needs.setup-ci-load.outputs.matrix-max-parallel).default }} matrix: - group: [1, 2, 3, 4, 5] + group: ${{ needs.setup-ci-load.outputs.enable-split == '1' && fromJSON('[1, 2, 3, 4, 5]') || fromJSON('[1]') }} python-version: >- ${{ fromJSON(needs.setup-ci-load.outputs.python-version).jax-tests @@ -270,12 +318,13 @@ jobs: install_pennylane_lightning_master: true pytest_coverage_flags: ${{ inputs.pytest_coverage_flags }} pytest_markers: jax and not qcut and not finite-diff and not param-shift - pytest_additional_args: --splits 5 --group ${{ matrix.group }} - pytest_durations_file_path: '.github/workflows/jax_tests_durations.json' + pytest_additional_args: ${{ needs.setup-ci-load.outputs.enable-split == '1' && format('--splits {0} --group {1}', '5', matrix.group) || '' }} + pytest_durations_file_path: .github/workflows/jax_tests_durations.json pytest_store_durations: ${{ inputs.pytest_store_durations }} additional_pip_packages: pytest-split requirements_file: ${{ github.event_name == 'schedule' && strategy.job-index == 0 && 'jax.txt' || '' }} disable_new_opmath: ${{ inputs.disable_new_opmath }} + python_warning_level: ${{ inputs.python_warning_level }} core-tests: @@ -288,7 +337,7 @@ jobs: || fromJSON(needs.setup-ci-load.outputs.matrix-max-parallel).default }} matrix: - group: [1, 2, 3, 4, 5] + group: ${{ needs.setup-ci-load.outputs.enable-split && fromJSON('[1, 2, 3, 4, 5]') || fromJSON('[1]') }} python-version: >- ${{ fromJSON(needs.setup-ci-load.outputs.python-version).core-tests @@ -308,12 +357,13 @@ jobs: install_pennylane_lightning_master: true pytest_coverage_flags: ${{ inputs.pytest_coverage_flags }} pytest_markers: core and not qcut and not finite-diff and not param-shift - pytest_additional_args: --splits 5 --group ${{ matrix.group }} - pytest_durations_file_path: '.github/workflows/core_tests_durations.json' + pytest_additional_args: ${{ needs.setup-ci-load.outputs.enable-split == '1' && format('--splits {0} --group {1}', '5', matrix.group ) || '' }} + pytest_durations_file_path: .github/workflows/core_tests_durations.json pytest_store_durations: ${{ inputs.pytest_store_durations }} additional_pip_packages: pytest-split requirements_file: ${{ github.event_name == 'schedule' && strategy.job-index == 0 && 'core.txt' || '' }} disable_new_opmath: ${{ inputs.disable_new_opmath }} + python_warning_level: ${{ inputs.python_warning_level }} all-interfaces-tests: @@ -347,6 +397,7 @@ jobs: pytest_markers: all_interfaces requirements_file: ${{ github.event_name == 'schedule' && strategy.job-index == 0 && 'all_interfaces.txt' || '' }} disable_new_opmath: ${{ inputs.disable_new_opmath }} + python_warning_level: ${{ inputs.python_warning_level }} external-libraries-tests: @@ -386,6 +437,7 @@ jobs: additional_pip_packages: pyzx matplotlib stim quimb mitiq pennylane-qiskit ply requirements_file: ${{ github.event_name == 'schedule' && strategy.job-index == 0 && 'external.txt' || '' }} disable_new_opmath: ${{ inputs.disable_new_opmath }} + python_warning_level: ${{ inputs.python_warning_level }} qcut-tests: @@ -419,6 +471,7 @@ jobs: pytest_markers: qcut additional_pip_packages: kahypar==1.1.7 opt_einsum disable_new_opmath: ${{ inputs.disable_new_opmath }} + python_warning_level: ${{ inputs.python_warning_level }} qchem-tests: @@ -452,6 +505,7 @@ jobs: pytest_markers: qchem additional_pip_packages: openfermionpyscf basis-set-exchange disable_new_opmath: ${{ inputs.disable_new_opmath }} + python_warning_level: ${{ inputs.python_warning_level }} gradients-tests: needs: @@ -486,6 +540,7 @@ jobs: pytest_coverage_flags: ${{ inputs.pytest_coverage_flags }} pytest_markers: ${{ matrix.config.suite }} disable_new_opmath: ${{ inputs.disable_new_opmath }} + python_warning_level: ${{ inputs.python_warning_level }} data-tests: @@ -519,6 +574,7 @@ jobs: pytest_markers: data additional_pip_packages: h5py disable_new_opmath: ${{ inputs.disable_new_opmath }} + python_warning_level: ${{ inputs.python_warning_level }} device-tests: @@ -559,6 +615,7 @@ jobs: pytest_coverage_flags: ${{ inputs.pytest_coverage_flags }} pytest_additional_args: --device=${{ matrix.config.device }} --shots=${{ matrix.config.shots }} disable_new_opmath: ${{ inputs.disable_new_opmath }} + python_warning_level: ${{ inputs.python_warning_level }} upload-to-codecov: diff --git a/.github/workflows/package_warnings_as_errors.yml b/.github/workflows/package_warnings_as_errors.yml new file mode 100644 index 00000000000..dd529e626dc --- /dev/null +++ b/.github/workflows/package_warnings_as_errors.yml @@ -0,0 +1,24 @@ +name: Test-suite with Python warnings as errors +on: + # Scheduled trigger every Sunday at 2:35am UTC + schedule: + - cron: '35 2 * * 0' + workflow_dispatch: + +concurrency: + group: warnings-as-errors-tests-${{ github.ref }} + cancel-in-progress: true + +jobs: + test-warnings-as-errors: + strategy: + fail-fast: false + max-parallel: 1 + secrets: + codecov_token: ${{ secrets.CODECOV_TOKEN }} + uses: ./.github/workflows/interface-unit-tests.yml + with: + branch: ${{ github.ref }} + python_warning_level: 'error' + pipeline_mode: 'warnings' + run_lightened_ci: false diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index 8e11cf86473..4631e03d441 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -25,7 +25,7 @@ on: type: string default: '3.10' pipeline_mode: - description: The pipeline mode can be unit-tests, benchmarks, or reference-benchmark + description: The pipeline mode can be unit-tests, benchmarks, reference-benchmark, or warnings required: false type: string default: 'unit-tests' @@ -114,6 +114,11 @@ on: required: false type: string default: "False" + python_warning_level: + description: Sets the default Python warning level as defined by https://docs.python.org/3/using/cmdline.html#envvar-PYTHONWARNINGS + required: false + type: string + default: 'default' jobs: test: @@ -129,7 +134,7 @@ jobs: repository: PennyLaneAI/pennylane - name: Determine benchmark name - if: ${{ inputs.pipeline_mode != 'unit-tests' }} + if: ${{ contains(fromJSON('["benchmarks"]'), inputs.pipeline_mode) }} id: benchmark_name run: | job_name="${{ inputs.job_name }}" @@ -138,7 +143,7 @@ jobs: echo "benchmark_name=$_benchmark_name" >> $GITHUB_OUTPUT - name: Cache reference benchmarks - if: ${{ inputs.pipeline_mode != 'unit-tests' }} + if: ${{ contains(fromJSON('["benchmarks"]'), inputs.pipeline_mode) }} id: benchmark-cache uses: actions/cache@v3 with: @@ -149,7 +154,7 @@ jobs: id: continue run: >- echo "confirm=${{ - contains(fromJSON('["unit-tests", "benchmarks"]'), inputs.pipeline_mode) + contains(fromJSON('["unit-tests", "benchmarks", "warnings"]'), inputs.pipeline_mode) || (inputs.pipeline_mode == 'reference-benchmarks' && steps.benchmark-cache.outputs.cache-hit != 'true' )}}" >> $GITHUB_OUTPUT @@ -180,6 +185,8 @@ jobs: run: | if [[ "$PIPELINE_MODE" =~ .*"benchmarks".* ]]; then echo "args=$PYTEST_BENCHMARKS_ARGS $PYTEST_ADDITIONAL_ARGS" >> $GITHUB_OUTPUT + elif [[ "$PIPELINE_MODE" =~ .*"warnings".* ]]; then + echo "args=$PYTEST_ADDITIONAL_ARGS --continue-on-collection-errors" >> $GITHUB_OUTPUT else echo "args=$PYTEST_COVERAGE_ARGS $PYTEST_PARALLELISE_ARGS $PYTEST_ADDITIONAL_ARGS $PYTEST_DURATIONS_ARGS $PYTEST_STORE_ARGS" >> $GITHUB_OUTPUT fi @@ -193,7 +200,7 @@ jobs: COV_CORE_DATAFILE: .coverage.eager TF_USE_LEGACY_KERAS: "1" # sets to use tf-keras (Keras2) instead of keras (Keras3) when running TF tests # Calling PyTest by invoking Python first as that adds the current directory to sys.path - run: python -m pytest ${{ inputs.pytest_test_directory }} ${{ steps.pytest_args.outputs.args }} ${{ env.PYTEST_MARKER }} --disable-opmath=${{ inputs.disable_new_opmath }} + run: python -m pytest -W ${{ inputs.python_warning_level }} ${{ inputs.pytest_test_directory }} ${{ steps.pytest_args.outputs.args }} ${{ env.PYTEST_MARKER }} --disable-opmath=${{ inputs.disable_new_opmath }} - name: Upload Durations file as artifact if: ${{ inputs.pytest_store_durations == true }} diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md index 7b9416d62c3..9d4a79c2e7b 100644 --- a/doc/releases/changelog-dev.md +++ b/doc/releases/changelog-dev.md @@ -91,6 +91,9 @@

Documentation 📝

+* Add reporting of test warnings as failures. + [(#6217)](https://github.com/PennyLaneAI/pennylane/pull/6217) +

Bug fixes 🐛

* Fix a bug where zero-valued JVPs were calculated wrongly in the presence of shot vectors. diff --git a/pennylane/devices/tests/pytest.ini b/pennylane/devices/tests/pytest.ini index ba9755e1ca8..7444d17af76 100644 --- a/pennylane/devices/tests/pytest.ini +++ b/pennylane/devices/tests/pytest.ini @@ -1,4 +1,4 @@ [pytest] markers = skip_unsupported: skip a test if it uses an operation unsupported on a device -addopts = --benchmark-disable \ No newline at end of file +addopts = --benchmark-disable diff --git a/requirements-ci.txt b/requirements-ci.txt index 083beaae25f..38b70736dbf 100644 --- a/requirements-ci.txt +++ b/requirements-ci.txt @@ -13,3 +13,4 @@ matplotlib requests rich tomli # Drop once minimum Python version is 3.11 +pandas diff --git a/tests/pytest.ini b/tests/pytest.ini index 1897c3c65e4..19811af01bf 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -19,12 +19,7 @@ markers = logging: marks tests for pennylane logging external: marks tests that require external packages such as matplotlib and PyZX catalyst: marks tests for catalyst testing (select with '-m "catalyst"') -filterwarnings = - ignore::DeprecationWarning:autograd.numpy.numpy_wrapper - ignore:Casting complex values to real::autograd.numpy.numpy_wrapper - ignore:Casting complex values to real discards the imaginary part:UserWarning:torch.autograd - ignore:Call to deprecated create function:DeprecationWarning - ignore:the imp module is deprecated:DeprecationWarning - error::pennylane.PennyLaneDeprecationWarning addopts = --benchmark-disable xfail_strict=true +filterwarnings = + error::pennylane.PennyLaneDeprecationWarning