diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 37e7fa9d97..7852e6aa7b 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -2,7 +2,7 @@ version: 2 updates: - package-ecosystem: gomod directory: "/" - open-pull-requests-limit: 4 + open-pull-requests-limit: 10 schedule: interval: weekly commit-message: @@ -38,9 +38,9 @@ updates: - "sigs.k8s.io/*" - package-ecosystem: github-actions directory: "/" - open-pull-requests-limit: 3 + open-pull-requests-limit: 5 schedule: - interval: daily + interval: monthly commit-message: prefix: "deps(github):" groups: @@ -54,13 +54,21 @@ updates: # Update pinned pip packages via requiements.txt - package-ecosystem: "pip" - schedule: weekly + schedule: + interval: weekly commit-message: prefix: "deps(docker,pip):" directories: - "/docker/build" - "/docker/postgres-kanister-tools" - "/docker/postgresql" + groups: + common-pip: + patterns: + - "pip" + - "setuptools" + - "wheel" + - "awscli" - package-ecosystem: "docker" schedule: @@ -96,5 +104,3 @@ updates: - "/docker/mssql-tools" - "/docker/postgresql" - "/docker/redis-tools" - - diff --git a/.github/workflows/atlas-image-build.yaml b/.github/workflows/atlas-image-build.yaml index fd4444de5d..ac878d5838 100644 --- a/.github/workflows/atlas-image-build.yaml +++ b/.github/workflows/atlas-image-build.yaml @@ -24,7 +24,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: fetch-depth: 0 - - uses: tj-actions/changed-files@e9772d140489982e0e3704fea5ee93d536f1e275 # v45.0.1 + - uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c # v45.0.2 name: Get changed files id: changed-files with: @@ -39,7 +39,7 @@ jobs: if: needs.check-files.outputs.changed == 'true' steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Image metadata id: meta uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 @@ -57,7 +57,7 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/build-push-action@15560696de535e4014efeff63c48f16952e52dd1 # v6.2.0 + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: "{{defaultContext}}:docker/mongodb-atlas" push: true diff --git a/.github/workflows/build_docker.yaml b/.github/workflows/build_docker.yaml index 6bffd52986..1619c54cfc 100644 --- a/.github/workflows/build_docker.yaml +++ b/.github/workflows/build_docker.yaml @@ -47,7 +47,7 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Login to GHCR uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: @@ -66,7 +66,7 @@ jobs: ${{ inputs.extra_tags }} labels: ${{ inputs.labels }} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: . file: ${{ inputs.image_file }} diff --git a/.github/workflows/dependendy-review.yml b/.github/workflows/dependendy-review.yml index 2166876db9..1bbf4279c4 100644 --- a/.github/workflows/dependendy-review.yml +++ b/.github/workflows/dependendy-review.yml @@ -18,4 +18,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: 'Dependency Review' - uses: actions/dependency-review-action@v4 + uses: actions/dependency-review-action@5a2ce3f5b92ee19cbb1541a4984c76d921601d7c # v4.3.4 diff --git a/.github/workflows/kanister-image-build.yaml b/.github/workflows/kanister-image-build.yaml index 2670aa9ebf..f7af83a801 100644 --- a/.github/workflows/kanister-image-build.yaml +++ b/.github/workflows/kanister-image-build.yaml @@ -54,7 +54,7 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Image metadata id: meta uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 @@ -73,7 +73,7 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/build-push-action@15560696de535e4014efeff63c48f16952e52dd1 # v6.2.0 + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: "{{defaultContext}}:docker/build" platforms: linux/amd64,linux/arm64 diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 7fdcd0bdbd..e5eae51aa3 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -75,7 +75,7 @@ jobs: run: echo "${{needs.gomod.outputs.gomod}}" > go.mod - name: restore_gosum run: echo "${{needs.gomod.outputs.gosum}}" > go.sum - - uses: helm/kind-action@v1.10.0 + - uses: helm/kind-action@0025e74a8c7512023d06dc019c617aa3cf561fde # v1.10.0 - run: | make install-csi-hostpath-driver make install-minio diff --git a/.github/workflows/ossf-scorecard.yml b/.github/workflows/ossf-scorecard.yml index c9f36922cd..89e8bc30a9 100644 --- a/.github/workflows/ossf-scorecard.yml +++ b/.github/workflows/ossf-scorecard.yml @@ -39,7 +39,7 @@ jobs: - # Upload the results to GitHub's code scanning dashboard. name: "Upload to results to dashboard" - uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + uses: github/codeql-action/upload-sarif@461ef6c76dfe95d5c364de2f431ddbd31a417628 # v3.26.9 with: sarif_file: results.sarif - diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 848f39cf59..c6224f299c 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -91,7 +91,7 @@ jobs: export HELM_RELEASE_REPO_INDEX=https://charts.kanister.io/ make package-helm VERSION=${RELEASE_TAG} - name: Free Disk Space (Ubuntu) - uses: jlumbroso/free-disk-space@main + uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1 - name: gorelease run: make gorelease env: @@ -102,7 +102,6 @@ jobs: run: echo "release_url=https://github.com/kanisterio/kanister/releases/tag/${RELEASE_TAG}" >> "$GITHUB_OUTPUT" build_docs: - runs-on: ubuntu-latest needs: [run_if, release_packages] permissions: contents: read diff --git a/.github/workflows/triage-issues.yaml b/.github/workflows/triage-issues.yaml index 59aea954a1..919aeb1281 100644 --- a/.github/workflows/triage-issues.yaml +++ b/.github/workflows/triage-issues.yaml @@ -19,13 +19,13 @@ jobs: steps: - name: Add label - uses: actions-ecosystem/action-add-labels@v1.1.3 + uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf # v1.1.3 with: labels: "triage" github_token: ${{ secrets.GITHUB_TOKEN }} - name: Add comment - uses: actions-ecosystem/action-create-comment@v1.0.2 + uses: actions-ecosystem/action-create-comment@e23bc59fbff7aac7f9044bd66c2dc0fe1286f80b # v1.0.2 if: github.event.action == 'opened' with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -37,7 +37,7 @@ jobs: If you haven't already, please take a moment to review our project's [Code of Conduct](https://github.com/kanisterio/kanister/blob/master/CODE_OF_CONDUCT.md) document. - name: Update project - uses: alex-page/github-project-automation-plus@v0.9.0 + uses: alex-page/github-project-automation-plus@303f24a24c67ce7adf565a07e96720faf126fe36 # v0.9.0 with: repo-token: ${{ secrets.GH_TOKEN }} # must use a PAT here project: Kanister diff --git a/.github/workflows/triage-prs.yaml b/.github/workflows/triage-prs.yaml index e03fae2960..3a9f2068b5 100644 --- a/.github/workflows/triage-prs.yaml +++ b/.github/workflows/triage-prs.yaml @@ -20,7 +20,7 @@ jobs: steps: - name: Comment - uses: actions-ecosystem/action-create-comment@v1.0.2 + uses: actions-ecosystem/action-create-comment@e23bc59fbff7aac7f9044bd66c2dc0fe1286f80b # v1.0.2 # Avoid adding a comment when the PR is on the same repo. if: github.event.action == 'opened' && github.event.pull_request.head.repo.fork with: @@ -31,7 +31,7 @@ jobs: If you haven't already, please take a moment to review our project [contributing guideline](https://github.com/kanisterio/kanister/blob/master/CONTRIBUTING.md) and [Code of Conduct](https://github.com/kanisterio/kanister/blob/master/CODE_OF_CONDUCT.md) document. - name: Update status in project - uses: alex-page/github-project-automation-plus@v0.9.0 + uses: alex-page/github-project-automation-plus@303f24a24c67ce7adf565a07e96720faf126fe36 # v0.9.0 # This only works for PRs opened in the same repo and not by dependabot. # Other PRs don't get the necessary credentials. if: github.repository == 'kanisterio/kanister' && !github.event.pull_request.head.repo.fork diff --git a/.golangci.yml b/.golangci.yml index 0c73ba9e1c..9be2882f0d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -43,7 +43,6 @@ issues: - unparam # Tests might have unused function parameters. - dupl - nestif - - gci - text: "`ctx` is unused" # Context might not be in use in places, but for consistency, we pass it. linters: @@ -75,7 +74,6 @@ linters-settings: - default - prefix(github.com/kanisterio/kanister) - blank - - dot skip-generated: true stylecheck: checks: [ "all", "-ST1001", "-ST1005", "-ST1016", "-ST1023", "-ST1000"] diff --git a/CHANGELOG.md b/CHANGELOG.md index 03c9c941ad..120a52f76b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,33 +1,68 @@ # Release Notes +## 0.111.0 + +### New Features + + +* Add support for Read-Only and Write Access Modes when connecting to the Kopia Repository Server in `kando`. + + +* Add support for Cache Size Limits to the `kopia server start` command. + + +* Add support to pass labels and annotations to the methods that create/clone VolumeSnapshot and VolumeSnapshotContent resources. + + +* Support `image` argument for `ExportRDSSnapshotToLocation` and `RestoreRDSSnapshot` functions to override default postgres-kanister-tools image. + + +* Added support to customise the labels and annotations of the temporary pods that are created by some Kanister functions. + + +* Added two new fields, `podLabels` and `podAnnotations`, to the ActionSet. These fields can be used to configure the labels and annotations of the Kanister function pod run by an ActionSet. + +### Security Issues + + +* Update Go to 1.22.7 to pull in latest security updates. + +### Other Notes + + +* Update ubi-minimal base image to ubi-minimal:9.4-1227.1725849298. + + +* Add `stylecheck`, `errcheck`, and `misspel` linters to test packages. + ## 0.110.0 -## New Features +### New Features - + * Split parallelism helm value into dataStore.parallelism.upload and dataStore.parallelism.download to be used separately in BackupDataUsingKopiaServer and RestoreDataUsingKopiaServer -## Bug Fixes +### Bug Fixes - + * Make pod writer exec wait for cat command to finish. Fixes race condition between cat cat command end exec termination. - + * Make sure all storage providers return similar error if snapshot doesn't exist, which is expected by DeleteVolumeSnapshot -## Other Notes +### Other Notes - + * Update ubi-minimal base image to ubi-minimal:9.4-1194 - + * Update errkit to v0.0.2 - + * Switch pkg/app to errkit - + * Switch pkg/kopia to errkit - + * Switch pkg/kube to errkit diff --git a/CHANGELOG_CURRENT.md b/CHANGELOG_CURRENT.md index 03c9c941ad..1ebf3b31bf 100644 --- a/CHANGELOG_CURRENT.md +++ b/CHANGELOG_CURRENT.md @@ -1,33 +1,36 @@ # Release Notes -## 0.110.0 +## 0.111.0 ## New Features - -* Split parallelism helm value into dataStore.parallelism.upload and dataStore.parallelism.download to be used separately in BackupDataUsingKopiaServer and RestoreDataUsingKopiaServer + +* Add support for Read-Only and Write Access Modes when connecting to the Kopia Repository Server in `kando`. -## Bug Fixes + +* Add support for Cache Size Limits to the `kopia server start` command. - -* Make pod writer exec wait for cat command to finish. Fixes race condition between cat cat command end exec termination. + +* Add support to pass labels and annotations to the methods that create/clone VolumeSnapshot and VolumeSnapshotContent resources. - -* Make sure all storage providers return similar error if snapshot doesn't exist, which is expected by DeleteVolumeSnapshot + +* Support `image` argument for `ExportRDSSnapshotToLocation` and `RestoreRDSSnapshot` functions to override default postgres-kanister-tools image. -## Other Notes + +* Added support to customise the labels and annotations of the temporary pods that are created by some Kanister functions. + + +* Added two new fields, `podLabels` and `podAnnotations`, to the ActionSet. These fields can be used to configure the labels and annotations of the Kanister function pod run by an ActionSet. - -* Update ubi-minimal base image to ubi-minimal:9.4-1194 +## Security Issues - -* Update errkit to v0.0.2 + +* Update Go to 1.22.7 to pull in latest security updates. - -* Switch pkg/app to errkit +## Other Notes - -* Switch pkg/kopia to errkit + +* Update ubi-minimal base image to ubi-minimal:9.4-1227.1725849298. - -* Switch pkg/kube to errkit + +* Add `stylecheck`, `errcheck`, and `misspel` linters to test packages. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 67f8e6bce7..0643e9a86f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -143,6 +143,13 @@ contributions are high quality and easy for our community to review and accept. Please don't hesitate to reach out to us on [Slack](https://join.slack.com/t/kanisterio/shared_invite/enQtNzg2MDc4NzA0ODY4LTU1NDU2NDZhYjk3YmE5MWNlZWMwYzk1NjNjOGQ3NjAyMjcxMTIyNTE1YzZlMzgwYmIwNWFkNjU0NGFlMzNjNTk). if you have any questions about contributing! +**Filling details in PR template** + +While raising a pull request it's necessary to fill the details asked in the PR template. Some of the +fields might not be applicable for for the PR but filling up proper details for `Change Overview` +and `Test Plan` is a must. These fields help to set the necessary context related to the change made +in PR which helps significantly in reviewing the PR. + ### Commit conventions #### Types: diff --git a/Dockerfile.in b/Dockerfile.in index d16497db83..a5f4822514 100644 --- a/Dockerfile.in +++ b/Dockerfile.in @@ -1,4 +1,4 @@ -ARG base_image=registry.access.redhat.com/ubi9/ubi-minimal:9.4-1227.1725849298 +ARG base_image=registry.access.redhat.com/ubi9/ubi-minimal:9.4-1227.1726694542 FROM ${base_image} ARG kanister_version diff --git a/Makefile b/Makefile index a3a4fc4802..7bdfbb0934 100644 --- a/Makefile +++ b/Makefile @@ -148,7 +148,7 @@ push-name: version: @echo $(VERSION) -.PHONY: deploy test codegen build-dirs run clean container-clean bin-clean docs start-kind tiller stop-kind release-snapshot go-mod-download +.PHONY: deploy format-vet go-test test codegen build-dirs run clean container-clean bin-clean docs start-kind tiller stop-kind release-snapshot go-mod-download deploy: release-controller .deploy-$(DOTFILE_IMAGE) .deploy-$(DOTFILE_IMAGE): @@ -158,9 +158,14 @@ deploy: release-controller .deploy-$(DOTFILE_IMAGE) bundle.yaml.in > .deploy-$(DOTFILE_IMAGE) @kubectl apply -f .deploy-$(DOTFILE_IMAGE) -test: build-dirs +format-vet: build-dirs + @$(MAKE) run CMD="./build/format-vet.sh $(SRC_DIRS)" + +go-test: build-dirs @$(MAKE) run CMD="TEST_FILTER=$(TEST_FILTER) ./build/test.sh $(SRC_DIRS)" +test: format-vet go-test + helm-test: build-dirs @$(MAKE) run CMD="./build/helm-test.sh $(SRC_DIRS)" diff --git a/ROADMAP.md b/ROADMAP.md index b0844bb5c8..a18f15fbbd 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -39,3 +39,4 @@ Please join the Kanister community to give feedback on the roadmap resurrect https://github.com/kanisterio/kanister/issues/193 1. Merge the Repository controller into the Kanister controller. 1. Support for creation of blueprints/actionsets in application namespaces https://github.com/kanisterio/kanister/discussions/2922 +1. Support iterating over multiple resources with the same function https://github.com/kanisterio/kanister/discussions/3001 diff --git a/build/format-vet.sh b/build/format-vet.sh new file mode 100755 index 0000000000..b782a893e5 --- /dev/null +++ b/build/format-vet.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# Copyright 2024 The Kanister Authors. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset + +export CGO_ENABLED=0 +export GO111MODULE=on + +TARGETS=$(for d in "$@"; do echo ./$d/...; done) + +echo -n "Checking gofmt: " +ERRS=$(find "$@" -type f -name \*.go | xargs gofmt -l 2>&1 || true) +if [ -n "${ERRS}" ]; then + echo "FAIL - the following files need to be gofmt'ed:" + for e in ${ERRS}; do + echo " $e" + done + echo + exit 1 +fi +echo "PASS" +echo + +echo -n "Checking go vet: " +go vet ${TARGETS} +echo "PASS" \ No newline at end of file diff --git a/build/test.sh b/build/test.sh index 58410d46b4..c5d956b41f 100755 --- a/build/test.sh +++ b/build/test.sh @@ -33,31 +33,6 @@ fi TARGETS=$(for d in "$@"; do echo ./$d/...; done) -echo -n "Checking gofmt: " -ERRS=$(find "$@" -type f -name \*.go | xargs gofmt -l 2>&1 || true) -if [ -n "${ERRS}" ]; then - echo "FAIL - the following files need to be gofmt'ed:" - for e in ${ERRS}; do - echo " $e" - done - echo - exit 1 -fi -echo "PASS" -echo - -echo -n "Checking go vet: " -ERRS=$(go vet ${TARGETS} 2>&1 || true) -if [ -n "${ERRS}" ]; then - echo "FAIL" - echo "${ERRS}" - echo - # TODO: Renable govet. Currently generated code fails to pass go vet. report, - # but don't exit on failures. - #exit 1 -fi -echo - check_dependencies() { # Check if minio is already deployed. We suppress only `stdout` and not `stderr` to make sure we catch errors if `helm status` fails if helm status minio -n minio 1> /dev/null ; then diff --git a/docker/controller/Dockerfile b/docker/controller/Dockerfile index 95cd881b10..6f6f92028c 100644 --- a/docker/controller/Dockerfile +++ b/docker/controller/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi9/ubi-minimal:9.4-1227.1725849298 +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.4-1227.1726694542 LABEL maintainer="Tom Manville" diff --git a/docker/mongodb-atlas/Dockerfile b/docker/mongodb-atlas/Dockerfile index 395ee830d7..ec4ce8cf0f 100644 --- a/docker/mongodb-atlas/Dockerfile +++ b/docker/mongodb-atlas/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi9/ubi-minimal:9.4-1227.1725849298 +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.4-1227.1726694542 RUN cat >/etc/yum.repos.d/mongodb.repo <:-kanister-operator \ + --namespace= + +Creating a Role with Granular Permissions +========================================= + +If Blueprint doesn't require access to all the resources that are included +in the `edit` ClusterRole, you can create a `Role` in application namespace +with just the specific resources and verbs that Blueprint needs, and a `RoleBinding` +in application namespace to bind the `Role` to Kanister's Service Account. +This approach enhances security by granting only the necessary permissions. + +1. Create a `Role` with the permissions required by the Blueprint: + +.. code-block:: yaml + + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: kanister-role + namespace: + rules: + - apiGroups: [""] + resources: ["pods", "pods/log", "persistentvolumeclaims" ,"secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["apps"] + resources: ["deployments", "statefulsets"] + verbs: ["get", "list", "watch"] + +2. Create a `RoleBinding` to bind the `Role` to Kanister's Service Account: + +.. code-block:: bash + + kubectl create rolebinding kanister-role-binding --role=kanister-role \ + --serviceaccount=:-kanister-operator \ + --namespace= + +After setting up the required `Role`/`RoleBinding`, Kanister will be able +to successfully perform snapshot and restore operations in the application's +namespace. diff --git a/docs/tutorial.rst b/docs/tutorial.rst index 11649ca12f..762bbdc7fb 100644 --- a/docs/tutorial.rst +++ b/docs/tutorial.rst @@ -25,6 +25,8 @@ Prerequisites * A running Kanister controller. See :ref:`install` +* Proper RBAC configured for the Kanister controller. See :ref:`rbac` + * Access to an S3 bucket and credentials. Example Application diff --git a/docs_new/.vitepress/config.mts b/docs_new/.vitepress/config.mts index 1a9635f73e..df2a309e60 100644 --- a/docs_new/.vitepress/config.mts +++ b/docs_new/.vitepress/config.mts @@ -23,6 +23,7 @@ export default defineConfig({ sidebar: [ { text: "Overview", link: "/overview" }, { text: "Installation", link: "/install" }, + { text: "RBAC Configuration", link: "/rbac" }, { text: "Tutorial", link: "/tutorial" }, { text: "Architecture", link: "/architecture" }, { text: "Tooling", link: "/tooling" }, diff --git a/docs_new/architecture.md b/docs_new/architecture.md index 7eef23ca5f..4c07da54ed 100644 --- a/docs_new/architecture.md +++ b/docs_new/architecture.md @@ -159,8 +159,7 @@ type ActionSpec struct { - `ConfigMaps` and `Secrets`, similar to `Artifacts`, are a mappings of names specified in the Blueprint referencing the Kubernetes object to be used. -- `Profile` is a reference to a `Profile`{.interpreted-text - role="ref"} Kubernetes CustomResource that will be made available to +- `Profile` is a reference to a [Profile](#profiles) Kubernetes CustomResource that will be made available to the Blueprint. - `Options` is used to specify additional values to be used in the Blueprint @@ -369,8 +368,8 @@ in which it is deployed. When it sees an ActionSet with a nil status field, it immediately initializes the ActionSet\'s status to the Pending State. The status is also prepopulated with the pending phases. -Execution begins by resolving all the `templates`{.interpreted-text -role="ref"}. If any required object references or artifacts are missing +Execution begins by resolving all the [Templates](templates.md). +If any required object references or artifacts are missing from the ActionSet, the ActionSet status is marked as failed. Otherwise, the template params are used to render the output Artifacts, and then the args in the Blueprint. diff --git a/docs_new/functions.md b/docs_new/functions.md index 07016e3b66..b3f1adee99 100644 --- a/docs_new/functions.md +++ b/docs_new/functions.md @@ -424,21 +424,32 @@ this phase, we will use the `backupInfo` Artifact provided by backup function. ``` yaml -- func: ScaleWorkload name: ShutdownApplication args: namespace: \"{{ - .Deployment.Namespace }}\" name: \"{{ .Deployment.Name }}\" kind: - Deployment replicas: 0 -- func: RestoreData name: RestoreFromObjectStore args: namespace: \"{{ - .Deployment.Namespace }}\" pod: \"{{ index .Deployment.Pods 0 }}\" - image: ghcr.io/kanisterio/kanister-tools: backupArtifactPrefix: - s3-bucket/path/artifactPrefix backupTag: \"{{ - .ArtifactsIn.backupInfo.KeyValue.backupIdentifier }}\" +- func: ScaleWorkload + name: ShutdownApplication + args: + namespace: \"{{.Deployment.Namespace }}\" + name: \"{{ .Deployment.Name }}\" + kind: Deployment + replicas: 0 +- func: RestoreData + name: RestoreFromObjectStore + args: + namespace: \"{{.Deployment.Namespace }}\" + pod: \"{{ index .Deployment.Pods 0 }}\" + image: ghcr.io/kanisterio/kanister-tools:0.110.0 + backupArtifactPrefix: s3-bucket/path/artifactPrefix + backupTag: \"{{.ArtifactsIn.backupInfo.KeyValue.backupIdentifier }}\" podAnnotations: annKey: annValue podLabels: labelKey: labelValue -- func: ScaleWorkload name: StartupApplication args: namespace: \"{{ - .Deployment.Namespace }}\" name: \"{{ .Deployment.Name }}\" kind: - Deployment replicas: 1 +- func: ScaleWorkload + name: StartupApplication + args: + namespace: \"{{.Deployment.Namespace }}\" + name: \"{{ .Deployment.Name }}\" + kind: Deployment + replicas: 1 ``` ### RestoreDataAll @@ -491,30 +502,38 @@ on all pods concurrently. For this phase, we will use the `params` Artifact provided by BackupDataAll function. ``` yaml - -- func: ScaleWorkload name: ShutdownApplication args: namespace: \"{{ - .Deployment.Namespace }}\" name: \"{{ .Deployment.Name }}\" kind: - Deployment replicas: 0 -- func: RestoreDataAll name: RestoreFromObjectStore args: namespace: - \"{{ .Deployment.Namespace }}\" image: - ghcr.io/kanisterio/kanister-tools: backupArtifactPrefix: - s3-bucket/path/artifactPrefix backupInfo: \"{{ - .ArtifactsIn.params.KeyValue.backupInfo }}\" +- func: ScaleWorkload + name: ShutdownApplication + args: + namespace: "{{ .Deployment.Namespace }}" + name: "{{ .Deployment.Name }}" + kind: Deployment + replicas: 0 +- func: RestoreDataAll + name: RestoreFromObjectStore + args: + namespace: "{{ .Deployment.Namespace }}" + image: ghcr.io/kanisterio/kanister-tools:0.110.0 + backupArtifactPrefix: s3-bucket/path/artifactPrefix + backupInfo: "{{ .ArtifactsIn.params.KeyValue.backupInfo }}" podAnnotations: annKey: annValue podLabels: labelKey: labelValue -- func: ScaleWorkload name: StartupApplication args: namespace: \"{{ - .Deployment.Namespace }}\" name: \"{{ .Deployment.Name }}\" kind: - Deployment replicas: 2 +- func: ScaleWorkload + name: StartupApplication + args: + namespace: "{{ .Deployment.Namespace }}" + name: "{{ .Deployment.Name }}" + kind: Deployment + replicas: 2 ``` ### CopyVolumeData This function copies data from the specified volume (referenced by a Kubernetes PersistentVolumeClaim) into an object store. This data can be -restored into a volume using the `restoredata`{.interpreted-text -role="ref"} function +restored into a volume using the [RestoreData](#restoredata) function ::: tip NOTE @@ -725,9 +744,8 @@ Arguments: ### CreateVolumeFromSnapshot This function is used to restore one or more PVCs of an application from -the snapshots taken using the `createvolumesnapshot`{.interpreted-text -role="ref"} function. It deletes old PVCs, if present and creates new -PVCs from the snapshots taken earlier. +the snapshots taken using the [CreateVolumeSnapshot](#createvolumesnapshot) function. +It deletes old PVCs, if present and creates new PVCs from the snapshots taken earlier. Arguments: @@ -1546,21 +1564,31 @@ provided by backup function. ``` yaml -- func: ScaleWorkload name: shutdownPod args: namespace: \"{{ - .Deployment.Namespace }}\" name: \"{{ .Deployment.Name }}\" kind: - Deployment replicas: 0 -- func: RestoreDataUsingKopiaServer name: restoreFromS3 args: - namespace: \"{{ .Deployment.Namespace }}\" pod: \"{{ index - .Deployment.Pods 0 }}\" backupIdentifier: \"{{ - .ArtifactsIn.backupIdentifier.KeyValue.id }}\" restorePath: - /mnt/data +- func: ScaleWorkload + name: shutdownPod + args: + namespace: \"{{.Deployment.Namespace }}\" + name: \"{{ .Deployment.Name }}\" + kind: Deployment + replicas: 0 +- func: RestoreDataUsingKopiaServer + name: restoreFromS3 + args: + namespace: \"{{ .Deployment.Namespace }}\" + pod: \"{{ index.Deployment.Pods 0 }}\" + backupIdentifier: \"{{.ArtifactsIn.backupIdentifier.KeyValue.id }}\" + restorePath: /mnt/data podAnnotations: annKey: annValue podLabels: labelKey: labelValue -- func: ScaleWorkload name: bringupPod args: namespace: \"{{ - .Deployment.Namespace }}\" name: \"{{ .Deployment.Name }}\" kind: - Deployment replicas: 1 +- func: ScaleWorkload + name: bringupPod + args: + namespace: \"{{.Deployment.Namespace }}\" + name: \"{{ .Deployment.Name }}\" + kind: Deployment + replicas: 1 ``` ### DeleteDataUsingKopiaServer diff --git a/docs_new/install.md b/docs_new/install.md index af8b4d7d25..209591e315 100644 --- a/docs_new/install.md +++ b/docs_new/install.md @@ -40,6 +40,15 @@ is provided on a best-effort basis. If you are using an older version of Kubernetes, please consider upgrading to a newer version. ::: +::: tip NOTE + +To improve the cluster's security, the default installation of Kanister is restricted +to access only the resources within its own namespace. As a result, Kanister may not be +able to snapshot or restore applications by default in other namespaces. +If Blueprint needs access to resources in other namespaces, please follow the steps +provided [here](rbac) to configure the access correctly. +::: + ## Configuring Kanister Use the `helm show values` command to list the configurable options: @@ -89,7 +98,7 @@ kubectl create secret tls my-tls-secret \--cert /path/to/tls.crt \--key ``` Install Kanister, providing the PEM-encoded CA bundle and the -[tls]{.title-ref} secret name like below: +`tls` secret name like below: ``` bash helm upgrade \--install kanister kanister/kanister-operator \--namespace diff --git a/docs_new/pnpm-lock.yaml b/docs_new/pnpm-lock.yaml index ff28d7c69b..3b5b0e68fa 100644 --- a/docs_new/pnpm-lock.yaml +++ b/docs_new/pnpm-lock.yaml @@ -1,198 +1,108 @@ -lockfileVersion: '6.0' +lockfileVersion: '9.0' settings: autoInstallPeers: true excludeLinksFromLockfile: false -devDependencies: - vitepress: - specifier: 1.0.0-rc.40 - version: 1.0.0-rc.40(@algolia/client-search@4.22.1)(search-insights@2.13.0) - vue: - specifier: ^3.4.15 - version: 3.4.15 +importers: + + .: + devDependencies: + vitepress: + specifier: 1.0.0-rc.40 + version: 1.0.0-rc.40(@algolia/client-search@4.22.1)(postcss@8.4.47)(search-insights@2.13.0) + vue: + specifier: ^3.4.15 + version: 3.4.15 packages: - /@algolia/autocomplete-core@1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1)(search-insights@2.13.0): + '@algolia/autocomplete-core@1.9.3': resolution: {integrity: sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw==} - dependencies: - '@algolia/autocomplete-plugin-algolia-insights': 1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1)(search-insights@2.13.0) - '@algolia/autocomplete-shared': 1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1) - transitivePeerDependencies: - - '@algolia/client-search' - - algoliasearch - - search-insights - dev: true - /@algolia/autocomplete-plugin-algolia-insights@1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1)(search-insights@2.13.0): + '@algolia/autocomplete-plugin-algolia-insights@1.9.3': resolution: {integrity: sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg==} peerDependencies: search-insights: '>= 1 < 3' - dependencies: - '@algolia/autocomplete-shared': 1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1) - search-insights: 2.13.0 - transitivePeerDependencies: - - '@algolia/client-search' - - algoliasearch - dev: true - /@algolia/autocomplete-preset-algolia@1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1): + '@algolia/autocomplete-preset-algolia@1.9.3': resolution: {integrity: sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA==} peerDependencies: '@algolia/client-search': '>= 4.9.1 < 6' algoliasearch: '>= 4.9.1 < 6' - dependencies: - '@algolia/autocomplete-shared': 1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1) - '@algolia/client-search': 4.22.1 - algoliasearch: 4.22.1 - dev: true - /@algolia/autocomplete-shared@1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1): + '@algolia/autocomplete-shared@1.9.3': resolution: {integrity: sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==} peerDependencies: '@algolia/client-search': '>= 4.9.1 < 6' algoliasearch: '>= 4.9.1 < 6' - dependencies: - '@algolia/client-search': 4.22.1 - algoliasearch: 4.22.1 - dev: true - /@algolia/cache-browser-local-storage@4.22.1: + '@algolia/cache-browser-local-storage@4.22.1': resolution: {integrity: sha512-Sw6IAmOCvvP6QNgY9j+Hv09mvkvEIDKjYW8ow0UDDAxSXy664RBNQk3i/0nt7gvceOJ6jGmOTimaZoY1THmU7g==} - dependencies: - '@algolia/cache-common': 4.22.1 - dev: true - /@algolia/cache-common@4.22.1: + '@algolia/cache-common@4.22.1': resolution: {integrity: sha512-TJMBKqZNKYB9TptRRjSUtevJeQVXRmg6rk9qgFKWvOy8jhCPdyNZV1nB3SKGufzvTVbomAukFR8guu/8NRKBTA==} - dev: true - /@algolia/cache-in-memory@4.22.1: + '@algolia/cache-in-memory@4.22.1': resolution: {integrity: sha512-ve+6Ac2LhwpufuWavM/aHjLoNz/Z/sYSgNIXsinGofWOysPilQZPUetqLj8vbvi+DHZZaYSEP9H5SRVXnpsNNw==} - dependencies: - '@algolia/cache-common': 4.22.1 - dev: true - /@algolia/client-account@4.22.1: + '@algolia/client-account@4.22.1': resolution: {integrity: sha512-k8m+oegM2zlns/TwZyi4YgCtyToackkOpE+xCaKCYfBfDtdGOaVZCM5YvGPtK+HGaJMIN/DoTL8asbM3NzHonw==} - dependencies: - '@algolia/client-common': 4.22.1 - '@algolia/client-search': 4.22.1 - '@algolia/transporter': 4.22.1 - dev: true - /@algolia/client-analytics@4.22.1: + '@algolia/client-analytics@4.22.1': resolution: {integrity: sha512-1ssi9pyxyQNN4a7Ji9R50nSdISIumMFDwKNuwZipB6TkauJ8J7ha/uO60sPJFqQyqvvI+px7RSNRQT3Zrvzieg==} - dependencies: - '@algolia/client-common': 4.22.1 - '@algolia/client-search': 4.22.1 - '@algolia/requester-common': 4.22.1 - '@algolia/transporter': 4.22.1 - dev: true - /@algolia/client-common@4.22.1: + '@algolia/client-common@4.22.1': resolution: {integrity: sha512-IvaL5v9mZtm4k4QHbBGDmU3wa/mKokmqNBqPj0K7lcR8ZDKzUorhcGp/u8PkPC/e0zoHSTvRh7TRkGX3Lm7iOQ==} - dependencies: - '@algolia/requester-common': 4.22.1 - '@algolia/transporter': 4.22.1 - dev: true - /@algolia/client-personalization@4.22.1: + '@algolia/client-personalization@4.22.1': resolution: {integrity: sha512-sl+/klQJ93+4yaqZ7ezOttMQ/nczly/3GmgZXJ1xmoewP5jmdP/X/nV5U7EHHH3hCUEHeN7X1nsIhGPVt9E1cQ==} - dependencies: - '@algolia/client-common': 4.22.1 - '@algolia/requester-common': 4.22.1 - '@algolia/transporter': 4.22.1 - dev: true - /@algolia/client-search@4.22.1: + '@algolia/client-search@4.22.1': resolution: {integrity: sha512-yb05NA4tNaOgx3+rOxAmFztgMTtGBi97X7PC3jyNeGiwkAjOZc2QrdZBYyIdcDLoI09N0gjtpClcackoTN0gPA==} - dependencies: - '@algolia/client-common': 4.22.1 - '@algolia/requester-common': 4.22.1 - '@algolia/transporter': 4.22.1 - dev: true - /@algolia/logger-common@4.22.1: + '@algolia/logger-common@4.22.1': resolution: {integrity: sha512-OnTFymd2odHSO39r4DSWRFETkBufnY2iGUZNrMXpIhF5cmFE8pGoINNPzwg02QLBlGSaLqdKy0bM8S0GyqPLBg==} - dev: true - /@algolia/logger-console@4.22.1: + '@algolia/logger-console@4.22.1': resolution: {integrity: sha512-O99rcqpVPKN1RlpgD6H3khUWylU24OXlzkavUAMy6QZd1776QAcauE3oP8CmD43nbaTjBexZj2nGsBH9Tc0FVA==} - dependencies: - '@algolia/logger-common': 4.22.1 - dev: true - /@algolia/requester-browser-xhr@4.22.1: + '@algolia/requester-browser-xhr@4.22.1': resolution: {integrity: sha512-dtQGYIg6MteqT1Uay3J/0NDqD+UciHy3QgRbk7bNddOJu+p3hzjTRYESqEnoX/DpEkaNYdRHUKNylsqMpgwaEw==} - dependencies: - '@algolia/requester-common': 4.22.1 - dev: true - /@algolia/requester-common@4.22.1: + '@algolia/requester-common@4.22.1': resolution: {integrity: sha512-dgvhSAtg2MJnR+BxrIFqlLtkLlVVhas9HgYKMk2Uxiy5m6/8HZBL40JVAMb2LovoPFs9I/EWIoFVjOrFwzn5Qg==} - dev: true - /@algolia/requester-node-http@4.22.1: + '@algolia/requester-node-http@4.22.1': resolution: {integrity: sha512-JfmZ3MVFQkAU+zug8H3s8rZ6h0ahHZL/SpMaSasTCGYR5EEJsCc8SI5UZ6raPN2tjxa5bxS13BRpGSBUens7EA==} - dependencies: - '@algolia/requester-common': 4.22.1 - dev: true - /@algolia/transporter@4.22.1: + '@algolia/transporter@4.22.1': resolution: {integrity: sha512-kzWgc2c9IdxMa3YqA6TN0NW5VrKYYW/BELIn7vnLyn+U/RFdZ4lxxt9/8yq3DKV5snvoDzzO4ClyejZRdV3lMQ==} - dependencies: - '@algolia/cache-common': 4.22.1 - '@algolia/logger-common': 4.22.1 - '@algolia/requester-common': 4.22.1 - dev: true - /@babel/helper-string-parser@7.23.4: + '@babel/helper-string-parser@7.23.4': resolution: {integrity: sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==} engines: {node: '>=6.9.0'} - dev: true - /@babel/helper-validator-identifier@7.22.20: + '@babel/helper-validator-identifier@7.22.20': resolution: {integrity: sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==} engines: {node: '>=6.9.0'} - dev: true - /@babel/parser@7.23.9: + '@babel/parser@7.23.9': resolution: {integrity: sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA==} engines: {node: '>=6.0.0'} hasBin: true - dependencies: - '@babel/types': 7.23.9 - dev: true - /@babel/types@7.23.9: + '@babel/types@7.23.9': resolution: {integrity: sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q==} engines: {node: '>=6.9.0'} - dependencies: - '@babel/helper-string-parser': 7.23.4 - '@babel/helper-validator-identifier': 7.22.20 - to-fast-properties: 2.0.0 - dev: true - /@docsearch/css@3.5.2: + '@docsearch/css@3.5.2': resolution: {integrity: sha512-SPiDHaWKQZpwR2siD0KQUwlStvIAnEyK6tAE2h2Wuoq8ue9skzhlyVQ1ddzOxX6khULnAALDiR/isSF3bnuciA==} - dev: true - /@docsearch/js@3.5.2(@algolia/client-search@4.22.1)(search-insights@2.13.0): + '@docsearch/js@3.5.2': resolution: {integrity: sha512-p1YFTCDflk8ieHgFJYfmyHBki1D61+U9idwrLh+GQQMrBSP3DLGKpy0XUJtPjAOPltcVbqsTjiPFfH7JImjUNg==} - dependencies: - '@docsearch/react': 3.5.2(@algolia/client-search@4.22.1)(search-insights@2.13.0) - preact: 10.19.3 - transitivePeerDependencies: - - '@algolia/client-search' - - '@types/react' - - react - - react-dom - - search-insights - dev: true - /@docsearch/react@3.5.2(@algolia/client-search@4.22.1)(search-insights@2.13.0): + '@docsearch/react@3.5.2': resolution: {integrity: sha512-9Ahcrs5z2jq/DcAvYtvlqEBHImbm4YJI8M9y0x6Tqg598P40HTEkX7hsMcIuThI+hTFxRGZ9hll0Wygm2yEjng==} peerDependencies: '@types/react': '>= 16.8.0 < 19.0.0' @@ -208,471 +118,286 @@ packages: optional: true search-insights: optional: true - dependencies: - '@algolia/autocomplete-core': 1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1)(search-insights@2.13.0) - '@algolia/autocomplete-preset-algolia': 1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1) - '@docsearch/css': 3.5.2 - algoliasearch: 4.22.1 - search-insights: 2.13.0 - transitivePeerDependencies: - - '@algolia/client-search' - dev: true - /@esbuild/aix-ppc64@0.20.2: - resolution: {integrity: sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==} + '@esbuild/aix-ppc64@0.21.5': + resolution: {integrity: sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==} engines: {node: '>=12'} cpu: [ppc64] os: [aix] - requiresBuild: true - dev: true - optional: true - /@esbuild/android-arm64@0.20.2: - resolution: {integrity: sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==} + '@esbuild/android-arm64@0.21.5': + resolution: {integrity: sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==} engines: {node: '>=12'} cpu: [arm64] os: [android] - requiresBuild: true - dev: true - optional: true - /@esbuild/android-arm@0.20.2: - resolution: {integrity: sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==} + '@esbuild/android-arm@0.21.5': + resolution: {integrity: sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==} engines: {node: '>=12'} cpu: [arm] os: [android] - requiresBuild: true - dev: true - optional: true - /@esbuild/android-x64@0.20.2: - resolution: {integrity: sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==} + '@esbuild/android-x64@0.21.5': + resolution: {integrity: sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==} engines: {node: '>=12'} cpu: [x64] os: [android] - requiresBuild: true - dev: true - optional: true - /@esbuild/darwin-arm64@0.20.2: - resolution: {integrity: sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==} + '@esbuild/darwin-arm64@0.21.5': + resolution: {integrity: sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==} engines: {node: '>=12'} cpu: [arm64] os: [darwin] - requiresBuild: true - dev: true - optional: true - /@esbuild/darwin-x64@0.20.2: - resolution: {integrity: sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==} + '@esbuild/darwin-x64@0.21.5': + resolution: {integrity: sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==} engines: {node: '>=12'} cpu: [x64] os: [darwin] - requiresBuild: true - dev: true - optional: true - /@esbuild/freebsd-arm64@0.20.2: - resolution: {integrity: sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==} + '@esbuild/freebsd-arm64@0.21.5': + resolution: {integrity: sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==} engines: {node: '>=12'} cpu: [arm64] os: [freebsd] - requiresBuild: true - dev: true - optional: true - /@esbuild/freebsd-x64@0.20.2: - resolution: {integrity: sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==} + '@esbuild/freebsd-x64@0.21.5': + resolution: {integrity: sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==} engines: {node: '>=12'} cpu: [x64] os: [freebsd] - requiresBuild: true - dev: true - optional: true - /@esbuild/linux-arm64@0.20.2: - resolution: {integrity: sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==} + '@esbuild/linux-arm64@0.21.5': + resolution: {integrity: sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==} engines: {node: '>=12'} cpu: [arm64] os: [linux] - requiresBuild: true - dev: true - optional: true - /@esbuild/linux-arm@0.20.2: - resolution: {integrity: sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==} + '@esbuild/linux-arm@0.21.5': + resolution: {integrity: sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==} engines: {node: '>=12'} cpu: [arm] os: [linux] - requiresBuild: true - dev: true - optional: true - /@esbuild/linux-ia32@0.20.2: - resolution: {integrity: sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==} + '@esbuild/linux-ia32@0.21.5': + resolution: {integrity: sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==} engines: {node: '>=12'} cpu: [ia32] os: [linux] - requiresBuild: true - dev: true - optional: true - /@esbuild/linux-loong64@0.20.2: - resolution: {integrity: sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==} + '@esbuild/linux-loong64@0.21.5': + resolution: {integrity: sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==} engines: {node: '>=12'} cpu: [loong64] os: [linux] - requiresBuild: true - dev: true - optional: true - /@esbuild/linux-mips64el@0.20.2: - resolution: {integrity: sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==} + '@esbuild/linux-mips64el@0.21.5': + resolution: {integrity: sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==} engines: {node: '>=12'} cpu: [mips64el] os: [linux] - requiresBuild: true - dev: true - optional: true - /@esbuild/linux-ppc64@0.20.2: - resolution: {integrity: sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==} + '@esbuild/linux-ppc64@0.21.5': + resolution: {integrity: sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==} engines: {node: '>=12'} cpu: [ppc64] os: [linux] - requiresBuild: true - dev: true - optional: true - /@esbuild/linux-riscv64@0.20.2: - resolution: {integrity: sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==} + '@esbuild/linux-riscv64@0.21.5': + resolution: {integrity: sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==} engines: {node: '>=12'} cpu: [riscv64] os: [linux] - requiresBuild: true - dev: true - optional: true - /@esbuild/linux-s390x@0.20.2: - resolution: {integrity: sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==} + '@esbuild/linux-s390x@0.21.5': + resolution: {integrity: sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==} engines: {node: '>=12'} cpu: [s390x] os: [linux] - requiresBuild: true - dev: true - optional: true - /@esbuild/linux-x64@0.20.2: - resolution: {integrity: sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==} + '@esbuild/linux-x64@0.21.5': + resolution: {integrity: sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==} engines: {node: '>=12'} cpu: [x64] os: [linux] - requiresBuild: true - dev: true - optional: true - /@esbuild/netbsd-x64@0.20.2: - resolution: {integrity: sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==} + '@esbuild/netbsd-x64@0.21.5': + resolution: {integrity: sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==} engines: {node: '>=12'} cpu: [x64] os: [netbsd] - requiresBuild: true - dev: true - optional: true - /@esbuild/openbsd-x64@0.20.2: - resolution: {integrity: sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==} + '@esbuild/openbsd-x64@0.21.5': + resolution: {integrity: sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==} engines: {node: '>=12'} cpu: [x64] os: [openbsd] - requiresBuild: true - dev: true - optional: true - /@esbuild/sunos-x64@0.20.2: - resolution: {integrity: sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==} + '@esbuild/sunos-x64@0.21.5': + resolution: {integrity: sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==} engines: {node: '>=12'} cpu: [x64] os: [sunos] - requiresBuild: true - dev: true - optional: true - /@esbuild/win32-arm64@0.20.2: - resolution: {integrity: sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==} + '@esbuild/win32-arm64@0.21.5': + resolution: {integrity: sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==} engines: {node: '>=12'} cpu: [arm64] os: [win32] - requiresBuild: true - dev: true - optional: true - /@esbuild/win32-ia32@0.20.2: - resolution: {integrity: sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==} + '@esbuild/win32-ia32@0.21.5': + resolution: {integrity: sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==} engines: {node: '>=12'} cpu: [ia32] os: [win32] - requiresBuild: true - dev: true - optional: true - /@esbuild/win32-x64@0.20.2: - resolution: {integrity: sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==} + '@esbuild/win32-x64@0.21.5': + resolution: {integrity: sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==} engines: {node: '>=12'} cpu: [x64] os: [win32] - requiresBuild: true - dev: true - optional: true - /@jridgewell/sourcemap-codec@1.4.15: + '@jridgewell/sourcemap-codec@1.4.15': resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} - dev: true - /@rollup/rollup-android-arm-eabi@4.14.0: - resolution: {integrity: sha512-jwXtxYbRt1V+CdQSy6Z+uZti7JF5irRKF8hlKfEnF/xJpcNGuuiZMBvuoYM+x9sr9iWGnzrlM0+9hvQ1kgkf1w==} + '@rollup/rollup-android-arm-eabi@4.22.4': + resolution: {integrity: sha512-Fxamp4aEZnfPOcGA8KSNEohV8hX7zVHOemC8jVBoBUHu5zpJK/Eu3uJwt6BMgy9fkvzxDaurgj96F/NiLukF2w==} cpu: [arm] os: [android] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-android-arm64@4.14.0: - resolution: {integrity: sha512-fI9nduZhCccjzlsA/OuAwtFGWocxA4gqXGTLvOyiF8d+8o0fZUeSztixkYjcGq1fGZY3Tkq4yRvHPFxU+jdZ9Q==} + '@rollup/rollup-android-arm64@4.22.4': + resolution: {integrity: sha512-VXoK5UMrgECLYaMuGuVTOx5kcuap1Jm8g/M83RnCHBKOqvPPmROFJGQaZhGccnsFtfXQ3XYa4/jMCJvZnbJBdA==} cpu: [arm64] os: [android] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-darwin-arm64@4.14.0: - resolution: {integrity: sha512-BcnSPRM76/cD2gQC+rQNGBN6GStBs2pl/FpweW8JYuz5J/IEa0Fr4AtrPv766DB/6b2MZ/AfSIOSGw3nEIP8SA==} + '@rollup/rollup-darwin-arm64@4.22.4': + resolution: {integrity: sha512-xMM9ORBqu81jyMKCDP+SZDhnX2QEVQzTcC6G18KlTQEzWK8r/oNZtKuZaCcHhnsa6fEeOBionoyl5JsAbE/36Q==} cpu: [arm64] os: [darwin] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-darwin-x64@4.14.0: - resolution: {integrity: sha512-LDyFB9GRolGN7XI6955aFeI3wCdCUszFWumWU0deHA8VpR3nWRrjG6GtGjBrQxQKFevnUTHKCfPR4IvrW3kCgQ==} + '@rollup/rollup-darwin-x64@4.22.4': + resolution: {integrity: sha512-aJJyYKQwbHuhTUrjWjxEvGnNNBCnmpHDvrb8JFDbeSH3m2XdHcxDd3jthAzvmoI8w/kSjd2y0udT+4okADsZIw==} cpu: [x64] os: [darwin] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-linux-arm-gnueabihf@4.14.0: - resolution: {integrity: sha512-ygrGVhQP47mRh0AAD0zl6QqCbNsf0eTo+vgwkY6LunBcg0f2Jv365GXlDUECIyoXp1kKwL5WW6rsO429DBY/bA==} + '@rollup/rollup-linux-arm-gnueabihf@4.22.4': + resolution: {integrity: sha512-j63YtCIRAzbO+gC2L9dWXRh5BFetsv0j0va0Wi9epXDgU/XUi5dJKo4USTttVyK7fGw2nPWK0PbAvyliz50SCQ==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm-musleabihf@4.22.4': + resolution: {integrity: sha512-dJnWUgwWBX1YBRsuKKMOlXCzh2Wu1mlHzv20TpqEsfdZLb3WoJW2kIEsGwLkroYf24IrPAvOT/ZQ2OYMV6vlrg==} cpu: [arm] os: [linux] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-linux-arm64-gnu@4.14.0: - resolution: {integrity: sha512-x+uJ6MAYRlHGe9wi4HQjxpaKHPM3d3JjqqCkeC5gpnnI6OWovLdXTpfa8trjxPLnWKyBsSi5kne+146GAxFt4A==} + '@rollup/rollup-linux-arm64-gnu@4.22.4': + resolution: {integrity: sha512-AdPRoNi3NKVLolCN/Sp4F4N1d98c4SBnHMKoLuiG6RXgoZ4sllseuGioszumnPGmPM2O7qaAX/IJdeDU8f26Aw==} cpu: [arm64] os: [linux] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-linux-arm64-musl@4.14.0: - resolution: {integrity: sha512-nrRw8ZTQKg6+Lttwqo6a2VxR9tOroa2m91XbdQ2sUUzHoedXlsyvY1fN4xWdqz8PKmf4orDwejxXHjh7YBGUCA==} + '@rollup/rollup-linux-arm64-musl@4.22.4': + resolution: {integrity: sha512-Gl0AxBtDg8uoAn5CCqQDMqAx22Wx22pjDOjBdmG0VIWX3qUBHzYmOKh8KXHL4UpogfJ14G4wk16EQogF+v8hmA==} cpu: [arm64] os: [linux] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-linux-powerpc64le-gnu@4.14.0: - resolution: {integrity: sha512-xV0d5jDb4aFu84XKr+lcUJ9y3qpIWhttO3Qev97z8DKLXR62LC3cXT/bMZXrjLF9X+P5oSmJTzAhqwUbY96PnA==} - cpu: [ppc64le] + '@rollup/rollup-linux-powerpc64le-gnu@4.22.4': + resolution: {integrity: sha512-3aVCK9xfWW1oGQpTsYJJPF6bfpWfhbRnhdlyhak2ZiyFLDaayz0EP5j9V1RVLAAxlmWKTDfS9wyRyY3hvhPoOg==} + cpu: [ppc64] os: [linux] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-linux-riscv64-gnu@4.14.0: - resolution: {integrity: sha512-SDDhBQwZX6LPRoPYjAZWyL27LbcBo7WdBFWJi5PI9RPCzU8ijzkQn7tt8NXiXRiFMJCVpkuMkBf4OxSxVMizAw==} + '@rollup/rollup-linux-riscv64-gnu@4.22.4': + resolution: {integrity: sha512-ePYIir6VYnhgv2C5Xe9u+ico4t8sZWXschR6fMgoPUK31yQu7hTEJb7bCqivHECwIClJfKgE7zYsh1qTP3WHUA==} cpu: [riscv64] os: [linux] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-linux-s390x-gnu@4.14.0: - resolution: {integrity: sha512-RxB/qez8zIDshNJDufYlTT0ZTVut5eCpAZ3bdXDU9yTxBzui3KhbGjROK2OYTTor7alM7XBhssgoO3CZ0XD3qA==} + '@rollup/rollup-linux-s390x-gnu@4.22.4': + resolution: {integrity: sha512-GqFJ9wLlbB9daxhVlrTe61vJtEY99/xB3C8e4ULVsVfflcpmR6c8UZXjtkMA6FhNONhj2eA5Tk9uAVw5orEs4Q==} cpu: [s390x] os: [linux] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-linux-x64-gnu@4.14.0: - resolution: {integrity: sha512-C6y6z2eCNCfhZxT9u+jAM2Fup89ZjiG5pIzZIDycs1IwESviLxwkQcFRGLjnDrP+PT+v5i4YFvlcfAs+LnreXg==} + '@rollup/rollup-linux-x64-gnu@4.22.4': + resolution: {integrity: sha512-87v0ol2sH9GE3cLQLNEy0K/R0pz1nvg76o8M5nhMR0+Q+BBGLnb35P0fVz4CQxHYXaAOhE8HhlkaZfsdUOlHwg==} cpu: [x64] os: [linux] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-linux-x64-musl@4.14.0: - resolution: {integrity: sha512-i0QwbHYfnOMYsBEyjxcwGu5SMIi9sImDVjDg087hpzXqhBSosxkE7gyIYFHgfFl4mr7RrXksIBZ4DoLoP4FhJg==} + '@rollup/rollup-linux-x64-musl@4.22.4': + resolution: {integrity: sha512-UV6FZMUgePDZrFjrNGIWzDo/vABebuXBhJEqrHxrGiU6HikPy0Z3LfdtciIttEUQfuDdCn8fqh7wiFJjCNwO+g==} cpu: [x64] os: [linux] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-win32-arm64-msvc@4.14.0: - resolution: {integrity: sha512-Fq52EYb0riNHLBTAcL0cun+rRwyZ10S9vKzhGKKgeD+XbwunszSY0rVMco5KbOsTlwovP2rTOkiII/fQ4ih/zQ==} + '@rollup/rollup-win32-arm64-msvc@4.22.4': + resolution: {integrity: sha512-BjI+NVVEGAXjGWYHz/vv0pBqfGoUH0IGZ0cICTn7kB9PyjrATSkX+8WkguNjWoj2qSr1im/+tTGRaY+4/PdcQw==} cpu: [arm64] os: [win32] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-win32-ia32-msvc@4.14.0: - resolution: {integrity: sha512-e/PBHxPdJ00O9p5Ui43+vixSgVf4NlLsmV6QneGERJ3lnjIua/kim6PRFe3iDueT1rQcgSkYP8ZBBXa/h4iPvw==} + '@rollup/rollup-win32-ia32-msvc@4.22.4': + resolution: {integrity: sha512-SiWG/1TuUdPvYmzmYnmd3IEifzR61Tragkbx9D3+R8mzQqDBz8v+BvZNDlkiTtI9T15KYZhP0ehn3Dld4n9J5g==} cpu: [ia32] os: [win32] - requiresBuild: true - dev: true - optional: true - /@rollup/rollup-win32-x64-msvc@4.14.0: - resolution: {integrity: sha512-aGg7iToJjdklmxlUlJh/PaPNa4PmqHfyRMLunbL3eaMO0gp656+q1zOKkpJ/CVe9CryJv6tAN1HDoR8cNGzkag==} + '@rollup/rollup-win32-x64-msvc@4.22.4': + resolution: {integrity: sha512-j8pPKp53/lq9lMXN57S8cFz0MynJk8OWNuUnXct/9KCpKU7DgU3bYMJhwWmcqC0UU29p8Lr0/7KEVcaM6bf47Q==} cpu: [x64] os: [win32] - requiresBuild: true - dev: true - optional: true - /@types/estree@1.0.5: + '@types/estree@1.0.5': resolution: {integrity: sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==} - dev: true - /@types/linkify-it@3.0.5: + '@types/linkify-it@3.0.5': resolution: {integrity: sha512-yg6E+u0/+Zjva+buc3EIb+29XEg4wltq7cSmd4Uc2EE/1nUVmxyzpX6gUXD0V8jIrG0r7YeOGVIbYRkxeooCtw==} - dev: true - /@types/markdown-it@13.0.7: + '@types/markdown-it@13.0.7': resolution: {integrity: sha512-U/CBi2YUUcTHBt5tjO2r5QV/x0Po6nsYwQU4Y04fBS6vfoImaiZ6f8bi3CjTCxBPQSO1LMyUqkByzi8AidyxfA==} - dependencies: - '@types/linkify-it': 3.0.5 - '@types/mdurl': 1.0.5 - dev: true - /@types/mdurl@1.0.5: + '@types/mdurl@1.0.5': resolution: {integrity: sha512-6L6VymKTzYSrEf4Nev4Xa1LCHKrlTlYCBMTlQKFuddo1CvQcE52I0mwfOJayueUC7MJuXOeHTcIU683lzd0cUA==} - dev: true - /@types/web-bluetooth@0.0.20: + '@types/web-bluetooth@0.0.20': resolution: {integrity: sha512-g9gZnnXVq7gM7v3tJCWV/qw7w+KeOlSHAhgF9RytFyifW6AF61hdT2ucrYhPq9hLs5JIryeupHV3qGk95dH9ow==} - dev: true - /@vitejs/plugin-vue@5.0.3(vite@5.2.8)(vue@3.4.15): + '@vitejs/plugin-vue@5.0.3': resolution: {integrity: sha512-b8S5dVS40rgHdDrw+DQi/xOM9ed+kSRZzfm1T74bMmBDCd8XO87NKlFYInzCtwvtWwXZvo1QxE2OSspTATWrbA==} engines: {node: ^18.0.0 || >=20.0.0} peerDependencies: vite: ^5.0.0 vue: ^3.2.25 - dependencies: - vite: 5.2.8 - vue: 3.4.15 - dev: true - /@vue/compiler-core@3.4.15: + '@vue/compiler-core@3.4.15': resolution: {integrity: sha512-XcJQVOaxTKCnth1vCxEChteGuwG6wqnUHxAm1DO3gCz0+uXKaJNx8/digSz4dLALCy8n2lKq24jSUs8segoqIw==} - dependencies: - '@babel/parser': 7.23.9 - '@vue/shared': 3.4.15 - entities: 4.5.0 - estree-walker: 2.0.2 - source-map-js: 1.0.2 - dev: true - /@vue/compiler-dom@3.4.15: + '@vue/compiler-dom@3.4.15': resolution: {integrity: sha512-wox0aasVV74zoXyblarOM3AZQz/Z+OunYcIHe1OsGclCHt8RsRm04DObjefaI82u6XDzv+qGWZ24tIsRAIi5MQ==} - dependencies: - '@vue/compiler-core': 3.4.15 - '@vue/shared': 3.4.15 - dev: true - /@vue/compiler-sfc@3.4.15: + '@vue/compiler-sfc@3.4.15': resolution: {integrity: sha512-LCn5M6QpkpFsh3GQvs2mJUOAlBQcCco8D60Bcqmf3O3w5a+KWS5GvYbrrJBkgvL1BDnTp+e8q0lXCLgHhKguBA==} - dependencies: - '@babel/parser': 7.23.9 - '@vue/compiler-core': 3.4.15 - '@vue/compiler-dom': 3.4.15 - '@vue/compiler-ssr': 3.4.15 - '@vue/shared': 3.4.15 - estree-walker: 2.0.2 - magic-string: 0.30.5 - postcss: 8.4.33 - source-map-js: 1.0.2 - dev: true - /@vue/compiler-ssr@3.4.15: + '@vue/compiler-ssr@3.4.15': resolution: {integrity: sha512-1jdeQyiGznr8gjFDadVmOJqZiLNSsMa5ZgqavkPZ8O2wjHv0tVuAEsw5hTdUoUW4232vpBbL/wJhzVW/JwY1Uw==} - dependencies: - '@vue/compiler-dom': 3.4.15 - '@vue/shared': 3.4.15 - dev: true - /@vue/devtools-api@6.5.1: + '@vue/devtools-api@6.5.1': resolution: {integrity: sha512-+KpckaAQyfbvshdDW5xQylLni1asvNSGme1JFs8I1+/H5pHEhqUKMEQD/qn3Nx5+/nycBq11qAEi8lk+LXI2dA==} - dev: true - /@vue/reactivity@3.4.15: + '@vue/reactivity@3.4.15': resolution: {integrity: sha512-55yJh2bsff20K5O84MxSvXKPHHt17I2EomHznvFiJCAZpJTNW8IuLj1xZWMLELRhBK3kkFV/1ErZGHJfah7i7w==} - dependencies: - '@vue/shared': 3.4.15 - dev: true - /@vue/runtime-core@3.4.15: + '@vue/runtime-core@3.4.15': resolution: {integrity: sha512-6E3by5m6v1AkW0McCeAyhHTw+3y17YCOKG0U0HDKDscV4Hs0kgNT5G+GCHak16jKgcCDHpI9xe5NKb8sdLCLdw==} - dependencies: - '@vue/reactivity': 3.4.15 - '@vue/shared': 3.4.15 - dev: true - /@vue/runtime-dom@3.4.15: + '@vue/runtime-dom@3.4.15': resolution: {integrity: sha512-EVW8D6vfFVq3V/yDKNPBFkZKGMFSvZrUQmx196o/v2tHKdwWdiZjYUBS+0Ez3+ohRyF8Njwy/6FH5gYJ75liUw==} - dependencies: - '@vue/runtime-core': 3.4.15 - '@vue/shared': 3.4.15 - csstype: 3.1.3 - dev: true - /@vue/server-renderer@3.4.15(vue@3.4.15): + '@vue/server-renderer@3.4.15': resolution: {integrity: sha512-3HYzaidu9cHjrT+qGUuDhFYvF/j643bHC6uUN9BgM11DVy+pM6ATsG6uPBLnkwOgs7BpJABReLmpL3ZPAsUaqw==} peerDependencies: vue: 3.4.15 - dependencies: - '@vue/compiler-ssr': 3.4.15 - '@vue/shared': 3.4.15 - vue: 3.4.15 - dev: true - /@vue/shared@3.4.15: + '@vue/shared@3.4.15': resolution: {integrity: sha512-KzfPTxVaWfB+eGcGdbSf4CWdaXcGDqckoeXUh7SB3fZdEtzPCK2Vq9B/lRRL3yutax/LWITz+SwvgyOxz5V75g==} - dev: true - /@vueuse/core@10.7.2(vue@3.4.15): + '@vueuse/core@10.7.2': resolution: {integrity: sha512-AOyAL2rK0By62Hm+iqQn6Rbu8bfmbgaIMXcE3TSr7BdQ42wnSFlwIdPjInO62onYsEMK/yDMU8C6oGfDAtZ2qQ==} - dependencies: - '@types/web-bluetooth': 0.0.20 - '@vueuse/metadata': 10.7.2 - '@vueuse/shared': 10.7.2(vue@3.4.15) - vue-demi: 0.14.6(vue@3.4.15) - transitivePeerDependencies: - - '@vue/composition-api' - - vue - dev: true - /@vueuse/integrations@10.7.2(focus-trap@7.5.4)(vue@3.4.15): + '@vueuse/integrations@10.7.2': resolution: {integrity: sha512-+u3RLPFedjASs5EKPc69Ge49WNgqeMfSxFn+qrQTzblPXZg6+EFzhjarS5edj2qAf6xQ93f95TUxRwKStXj/sQ==} peerDependencies: async-validator: '*' @@ -712,219 +437,107 @@ packages: optional: true universal-cookie: optional: true - dependencies: - '@vueuse/core': 10.7.2(vue@3.4.15) - '@vueuse/shared': 10.7.2(vue@3.4.15) - focus-trap: 7.5.4 - vue-demi: 0.14.6(vue@3.4.15) - transitivePeerDependencies: - - '@vue/composition-api' - - vue - dev: true - /@vueuse/metadata@10.7.2: + '@vueuse/metadata@10.7.2': resolution: {integrity: sha512-kCWPb4J2KGrwLtn1eJwaJD742u1k5h6v/St5wFe8Quih90+k2a0JP8BS4Zp34XUuJqS2AxFYMb1wjUL8HfhWsQ==} - dev: true - /@vueuse/shared@10.7.2(vue@3.4.15): + '@vueuse/shared@10.7.2': resolution: {integrity: sha512-qFbXoxS44pi2FkgFjPvF4h7c9oMDutpyBdcJdMYIMg9XyXli2meFMuaKn+UMgsClo//Th6+beeCgqweT/79BVA==} - dependencies: - vue-demi: 0.14.6(vue@3.4.15) - transitivePeerDependencies: - - '@vue/composition-api' - - vue - dev: true - /algoliasearch@4.22.1: + algoliasearch@4.22.1: resolution: {integrity: sha512-jwydKFQJKIx9kIZ8Jm44SdpigFwRGPESaxZBaHSV0XWN2yBJAOT4mT7ppvlrpA4UGzz92pqFnVKr/kaZXrcreg==} - dependencies: - '@algolia/cache-browser-local-storage': 4.22.1 - '@algolia/cache-common': 4.22.1 - '@algolia/cache-in-memory': 4.22.1 - '@algolia/client-account': 4.22.1 - '@algolia/client-analytics': 4.22.1 - '@algolia/client-common': 4.22.1 - '@algolia/client-personalization': 4.22.1 - '@algolia/client-search': 4.22.1 - '@algolia/logger-common': 4.22.1 - '@algolia/logger-console': 4.22.1 - '@algolia/requester-browser-xhr': 4.22.1 - '@algolia/requester-common': 4.22.1 - '@algolia/requester-node-http': 4.22.1 - '@algolia/transporter': 4.22.1 - dev: true - /csstype@3.1.3: + csstype@3.1.3: resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} - dev: true - /entities@4.5.0: + entities@4.5.0: resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} engines: {node: '>=0.12'} - dev: true - /esbuild@0.20.2: - resolution: {integrity: sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==} + esbuild@0.21.5: + resolution: {integrity: sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==} engines: {node: '>=12'} hasBin: true - requiresBuild: true - optionalDependencies: - '@esbuild/aix-ppc64': 0.20.2 - '@esbuild/android-arm': 0.20.2 - '@esbuild/android-arm64': 0.20.2 - '@esbuild/android-x64': 0.20.2 - '@esbuild/darwin-arm64': 0.20.2 - '@esbuild/darwin-x64': 0.20.2 - '@esbuild/freebsd-arm64': 0.20.2 - '@esbuild/freebsd-x64': 0.20.2 - '@esbuild/linux-arm': 0.20.2 - '@esbuild/linux-arm64': 0.20.2 - '@esbuild/linux-ia32': 0.20.2 - '@esbuild/linux-loong64': 0.20.2 - '@esbuild/linux-mips64el': 0.20.2 - '@esbuild/linux-ppc64': 0.20.2 - '@esbuild/linux-riscv64': 0.20.2 - '@esbuild/linux-s390x': 0.20.2 - '@esbuild/linux-x64': 0.20.2 - '@esbuild/netbsd-x64': 0.20.2 - '@esbuild/openbsd-x64': 0.20.2 - '@esbuild/sunos-x64': 0.20.2 - '@esbuild/win32-arm64': 0.20.2 - '@esbuild/win32-ia32': 0.20.2 - '@esbuild/win32-x64': 0.20.2 - dev: true - - /estree-walker@2.0.2: + + estree-walker@2.0.2: resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} - dev: true - /focus-trap@7.5.4: + focus-trap@7.5.4: resolution: {integrity: sha512-N7kHdlgsO/v+iD/dMoJKtsSqs5Dz/dXZVebRgJw23LDk+jMi/974zyiOYDziY2JPp8xivq9BmUGwIJMiuSBi7w==} - dependencies: - tabbable: 6.2.0 - dev: true - /fsevents@2.3.3: + fsevents@2.3.3: resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} os: [darwin] - requiresBuild: true - dev: true - optional: true - /magic-string@0.30.5: + magic-string@0.30.5: resolution: {integrity: sha512-7xlpfBaQaP/T6Vh8MO/EqXSW5En6INHEvEXQiuff7Gku0PWjU3uf6w/j9o7O+SpB5fOAkrI5HeoNgwjEO0pFsA==} engines: {node: '>=12'} - dependencies: - '@jridgewell/sourcemap-codec': 1.4.15 - dev: true - /mark.js@8.11.1: + mark.js@8.11.1: resolution: {integrity: sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==} - dev: true - /minisearch@6.3.0: + minisearch@6.3.0: resolution: {integrity: sha512-ihFnidEeU8iXzcVHy74dhkxh/dn8Dc08ERl0xwoMMGqp4+LvRSCgicb+zGqWthVokQKvCSxITlh3P08OzdTYCQ==} - dev: true - /nanoid@3.3.7: + nanoid@3.3.7: resolution: {integrity: sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==} engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} hasBin: true - dev: true - /picocolors@1.0.0: + picocolors@1.0.0: resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==} - dev: true - /postcss@8.4.33: + picocolors@1.1.0: + resolution: {integrity: sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==} + + postcss@8.4.33: resolution: {integrity: sha512-Kkpbhhdjw2qQs2O2DGX+8m5OVqEcbB9HRBvuYM9pgrjEFUg30A9LmXNlTAUj4S9kgtGyrMbTzVjH7E+s5Re2yg==} engines: {node: ^10 || ^12 || >=14} - dependencies: - nanoid: 3.3.7 - picocolors: 1.0.0 - source-map-js: 1.0.2 - dev: true - /postcss@8.4.38: - resolution: {integrity: sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==} + postcss@8.4.47: + resolution: {integrity: sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ==} engines: {node: ^10 || ^12 || >=14} - dependencies: - nanoid: 3.3.7 - picocolors: 1.0.0 - source-map-js: 1.2.0 - dev: true - /preact@10.19.3: + preact@10.19.3: resolution: {integrity: sha512-nHHTeFVBTHRGxJXKkKu5hT8C/YWBkPso4/Gad6xuj5dbptt9iF9NZr9pHbPhBrnT2klheu7mHTxTZ/LjwJiEiQ==} - dev: true - /rollup@4.14.0: - resolution: {integrity: sha512-Qe7w62TyawbDzB4yt32R0+AbIo6m1/sqO7UPzFS8Z/ksL5mrfhA0v4CavfdmFav3D+ub4QeAgsGEe84DoWe/nQ==} + rollup@4.22.4: + resolution: {integrity: sha512-vD8HJ5raRcWOyymsR6Z3o6+RzfEPCnVLMFJ6vRslO1jt4LO6dUo5Qnpg7y4RkZFM2DMe3WUirkI5c16onjrc6A==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true - dependencies: - '@types/estree': 1.0.5 - optionalDependencies: - '@rollup/rollup-android-arm-eabi': 4.14.0 - '@rollup/rollup-android-arm64': 4.14.0 - '@rollup/rollup-darwin-arm64': 4.14.0 - '@rollup/rollup-darwin-x64': 4.14.0 - '@rollup/rollup-linux-arm-gnueabihf': 4.14.0 - '@rollup/rollup-linux-arm64-gnu': 4.14.0 - '@rollup/rollup-linux-arm64-musl': 4.14.0 - '@rollup/rollup-linux-powerpc64le-gnu': 4.14.0 - '@rollup/rollup-linux-riscv64-gnu': 4.14.0 - '@rollup/rollup-linux-s390x-gnu': 4.14.0 - '@rollup/rollup-linux-x64-gnu': 4.14.0 - '@rollup/rollup-linux-x64-musl': 4.14.0 - '@rollup/rollup-win32-arm64-msvc': 4.14.0 - '@rollup/rollup-win32-ia32-msvc': 4.14.0 - '@rollup/rollup-win32-x64-msvc': 4.14.0 - fsevents: 2.3.3 - dev: true - /search-insights@2.13.0: + search-insights@2.13.0: resolution: {integrity: sha512-Orrsjf9trHHxFRuo9/rzm0KIWmgzE8RMlZMzuhZOJ01Rnz3D0YBAe+V6473t6/H6c7irs6Lt48brULAiRWb3Vw==} - dev: true - /shikiji-core@0.10.2: + shikiji-core@0.10.2: resolution: {integrity: sha512-9Of8HMlF96usXJHmCL3Gd0Fcf0EcyJUF9m8EoAKKd98mHXi0La2AZl1h6PegSFGtiYcBDK/fLuKbDa1l16r1fA==} - dev: true + deprecated: Shikiji is merged back to Shiki v1.0, please migrate over to get the latest updates - /shikiji-transformers@0.10.2: + shikiji-transformers@0.10.2: resolution: {integrity: sha512-7IVTwl1af205ywYEq5bOAYOTOFW4V1dVX1EablP0nWKErqZeD1o93VMytxmtJomqS+YwbB8doY8SE3MFMn0aPQ==} - dependencies: - shikiji: 0.10.2 - dev: true - /shikiji@0.10.2: + shikiji@0.10.2: resolution: {integrity: sha512-wtZg3T0vtYV2PnqusWQs3mDaJBdCPWxFDrBM/SE5LfrX92gjUvfEMlc+vJnoKY6Z/S44OWaCRzNIsdBRWcTAiw==} - dependencies: - shikiji-core: 0.10.2 - dev: true + deprecated: Shikiji is merged back to Shiki v1.0, please migrate over to get the latest updates - /source-map-js@1.0.2: + source-map-js@1.0.2: resolution: {integrity: sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==} engines: {node: '>=0.10.0'} - dev: true - /source-map-js@1.2.0: - resolution: {integrity: sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==} + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} engines: {node: '>=0.10.0'} - dev: true - /tabbable@6.2.0: + tabbable@6.2.0: resolution: {integrity: sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==} - dev: true - /to-fast-properties@2.0.0: + to-fast-properties@2.0.0: resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==} engines: {node: '>=4'} - dev: true - /vite@5.2.8: - resolution: {integrity: sha512-OyZR+c1CE8yeHw5V5t59aXsUPPVTHMDjEZz8MgguLL/Q7NblxhZUlTu9xSPqlsUO/y+X7dlU05jdhvyycD55DA==} + vite@5.4.7: + resolution: {integrity: sha512-5l2zxqMEPVENgvzTuBpHer2awaetimj2BGkhBPdnwKbPNOlHsODU+oiazEZzLK7KhAnOrO+XGYJYn4ZlUhDtDQ==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true peerDependencies: @@ -932,6 +545,7 @@ packages: less: '*' lightningcss: ^1.21.0 sass: '*' + sass-embedded: '*' stylus: '*' sugarss: '*' terser: ^5.4.0 @@ -944,21 +558,16 @@ packages: optional: true sass: optional: true + sass-embedded: + optional: true stylus: optional: true sugarss: optional: true terser: optional: true - dependencies: - esbuild: 0.20.2 - postcss: 8.4.38 - rollup: 4.14.0 - optionalDependencies: - fsevents: 2.3.3 - dev: true - /vitepress@1.0.0-rc.40(@algolia/client-search@4.22.1)(search-insights@2.13.0): + vitepress@1.0.0-rc.40: resolution: {integrity: sha512-1x9PCrcsJwqhpccyTR93uD6jpiPDeRC98CBCAQLLBb44a3VSXYBPzhCahi+2kwAYylu49p0XhseMPVM4IVcWcw==} hasBin: true peerDependencies: @@ -969,23 +578,532 @@ packages: optional: true postcss: optional: true + + vue-demi@0.14.6: + resolution: {integrity: sha512-8QA7wrYSHKaYgUxDA5ZC24w+eHm3sYCbp0EzcDwKqN3p6HqtTCGR/GVsPyZW92unff4UlcSh++lmqDWN3ZIq4w==} + engines: {node: '>=12'} + hasBin: true + peerDependencies: + '@vue/composition-api': ^1.0.0-rc.1 + vue: ^3.0.0-0 || ^2.6.0 + peerDependenciesMeta: + '@vue/composition-api': + optional: true + + vue@3.4.15: + resolution: {integrity: sha512-jC0GH4KkWLWJOEQjOpkqU1bQsBwf4R1rsFtw5GQJbjHVKWDzO6P0nWWBTmjp1xSemAioDFj1jdaK1qa3DnMQoQ==} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + +snapshots: + + '@algolia/autocomplete-core@1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1)(search-insights@2.13.0)': dependencies: - '@docsearch/css': 3.5.2 - '@docsearch/js': 3.5.2(@algolia/client-search@4.22.1)(search-insights@2.13.0) - '@types/markdown-it': 13.0.7 - '@vitejs/plugin-vue': 5.0.3(vite@5.2.8)(vue@3.4.15) - '@vue/devtools-api': 6.5.1 - '@vueuse/core': 10.7.2(vue@3.4.15) - '@vueuse/integrations': 10.7.2(focus-trap@7.5.4)(vue@3.4.15) - focus-trap: 7.5.4 - mark.js: 8.11.1 - minisearch: 6.3.0 - shikiji: 0.10.2 - shikiji-core: 0.10.2 - shikiji-transformers: 0.10.2 - vite: 5.2.8 - vue: 3.4.15 - transitivePeerDependencies: + '@algolia/autocomplete-plugin-algolia-insights': 1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1)(search-insights@2.13.0) + '@algolia/autocomplete-shared': 1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1) + transitivePeerDependencies: + - '@algolia/client-search' + - algoliasearch + - search-insights + + '@algolia/autocomplete-plugin-algolia-insights@1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1)(search-insights@2.13.0)': + dependencies: + '@algolia/autocomplete-shared': 1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1) + search-insights: 2.13.0 + transitivePeerDependencies: + - '@algolia/client-search' + - algoliasearch + + '@algolia/autocomplete-preset-algolia@1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1)': + dependencies: + '@algolia/autocomplete-shared': 1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1) + '@algolia/client-search': 4.22.1 + algoliasearch: 4.22.1 + + '@algolia/autocomplete-shared@1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1)': + dependencies: + '@algolia/client-search': 4.22.1 + algoliasearch: 4.22.1 + + '@algolia/cache-browser-local-storage@4.22.1': + dependencies: + '@algolia/cache-common': 4.22.1 + + '@algolia/cache-common@4.22.1': {} + + '@algolia/cache-in-memory@4.22.1': + dependencies: + '@algolia/cache-common': 4.22.1 + + '@algolia/client-account@4.22.1': + dependencies: + '@algolia/client-common': 4.22.1 + '@algolia/client-search': 4.22.1 + '@algolia/transporter': 4.22.1 + + '@algolia/client-analytics@4.22.1': + dependencies: + '@algolia/client-common': 4.22.1 + '@algolia/client-search': 4.22.1 + '@algolia/requester-common': 4.22.1 + '@algolia/transporter': 4.22.1 + + '@algolia/client-common@4.22.1': + dependencies: + '@algolia/requester-common': 4.22.1 + '@algolia/transporter': 4.22.1 + + '@algolia/client-personalization@4.22.1': + dependencies: + '@algolia/client-common': 4.22.1 + '@algolia/requester-common': 4.22.1 + '@algolia/transporter': 4.22.1 + + '@algolia/client-search@4.22.1': + dependencies: + '@algolia/client-common': 4.22.1 + '@algolia/requester-common': 4.22.1 + '@algolia/transporter': 4.22.1 + + '@algolia/logger-common@4.22.1': {} + + '@algolia/logger-console@4.22.1': + dependencies: + '@algolia/logger-common': 4.22.1 + + '@algolia/requester-browser-xhr@4.22.1': + dependencies: + '@algolia/requester-common': 4.22.1 + + '@algolia/requester-common@4.22.1': {} + + '@algolia/requester-node-http@4.22.1': + dependencies: + '@algolia/requester-common': 4.22.1 + + '@algolia/transporter@4.22.1': + dependencies: + '@algolia/cache-common': 4.22.1 + '@algolia/logger-common': 4.22.1 + '@algolia/requester-common': 4.22.1 + + '@babel/helper-string-parser@7.23.4': {} + + '@babel/helper-validator-identifier@7.22.20': {} + + '@babel/parser@7.23.9': + dependencies: + '@babel/types': 7.23.9 + + '@babel/types@7.23.9': + dependencies: + '@babel/helper-string-parser': 7.23.4 + '@babel/helper-validator-identifier': 7.22.20 + to-fast-properties: 2.0.0 + + '@docsearch/css@3.5.2': {} + + '@docsearch/js@3.5.2(@algolia/client-search@4.22.1)(search-insights@2.13.0)': + dependencies: + '@docsearch/react': 3.5.2(@algolia/client-search@4.22.1)(search-insights@2.13.0) + preact: 10.19.3 + transitivePeerDependencies: + - '@algolia/client-search' + - '@types/react' + - react + - react-dom + - search-insights + + '@docsearch/react@3.5.2(@algolia/client-search@4.22.1)(search-insights@2.13.0)': + dependencies: + '@algolia/autocomplete-core': 1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1)(search-insights@2.13.0) + '@algolia/autocomplete-preset-algolia': 1.9.3(@algolia/client-search@4.22.1)(algoliasearch@4.22.1) + '@docsearch/css': 3.5.2 + algoliasearch: 4.22.1 + optionalDependencies: + search-insights: 2.13.0 + transitivePeerDependencies: + - '@algolia/client-search' + + '@esbuild/aix-ppc64@0.21.5': + optional: true + + '@esbuild/android-arm64@0.21.5': + optional: true + + '@esbuild/android-arm@0.21.5': + optional: true + + '@esbuild/android-x64@0.21.5': + optional: true + + '@esbuild/darwin-arm64@0.21.5': + optional: true + + '@esbuild/darwin-x64@0.21.5': + optional: true + + '@esbuild/freebsd-arm64@0.21.5': + optional: true + + '@esbuild/freebsd-x64@0.21.5': + optional: true + + '@esbuild/linux-arm64@0.21.5': + optional: true + + '@esbuild/linux-arm@0.21.5': + optional: true + + '@esbuild/linux-ia32@0.21.5': + optional: true + + '@esbuild/linux-loong64@0.21.5': + optional: true + + '@esbuild/linux-mips64el@0.21.5': + optional: true + + '@esbuild/linux-ppc64@0.21.5': + optional: true + + '@esbuild/linux-riscv64@0.21.5': + optional: true + + '@esbuild/linux-s390x@0.21.5': + optional: true + + '@esbuild/linux-x64@0.21.5': + optional: true + + '@esbuild/netbsd-x64@0.21.5': + optional: true + + '@esbuild/openbsd-x64@0.21.5': + optional: true + + '@esbuild/sunos-x64@0.21.5': + optional: true + + '@esbuild/win32-arm64@0.21.5': + optional: true + + '@esbuild/win32-ia32@0.21.5': + optional: true + + '@esbuild/win32-x64@0.21.5': + optional: true + + '@jridgewell/sourcemap-codec@1.4.15': {} + + '@rollup/rollup-android-arm-eabi@4.22.4': + optional: true + + '@rollup/rollup-android-arm64@4.22.4': + optional: true + + '@rollup/rollup-darwin-arm64@4.22.4': + optional: true + + '@rollup/rollup-darwin-x64@4.22.4': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.22.4': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.22.4': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.22.4': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.22.4': + optional: true + + '@rollup/rollup-linux-powerpc64le-gnu@4.22.4': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.22.4': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.22.4': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.22.4': + optional: true + + '@rollup/rollup-linux-x64-musl@4.22.4': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.22.4': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.22.4': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.22.4': + optional: true + + '@types/estree@1.0.5': {} + + '@types/linkify-it@3.0.5': {} + + '@types/markdown-it@13.0.7': + dependencies: + '@types/linkify-it': 3.0.5 + '@types/mdurl': 1.0.5 + + '@types/mdurl@1.0.5': {} + + '@types/web-bluetooth@0.0.20': {} + + '@vitejs/plugin-vue@5.0.3(vite@5.4.7)(vue@3.4.15)': + dependencies: + vite: 5.4.7 + vue: 3.4.15 + + '@vue/compiler-core@3.4.15': + dependencies: + '@babel/parser': 7.23.9 + '@vue/shared': 3.4.15 + entities: 4.5.0 + estree-walker: 2.0.2 + source-map-js: 1.0.2 + + '@vue/compiler-dom@3.4.15': + dependencies: + '@vue/compiler-core': 3.4.15 + '@vue/shared': 3.4.15 + + '@vue/compiler-sfc@3.4.15': + dependencies: + '@babel/parser': 7.23.9 + '@vue/compiler-core': 3.4.15 + '@vue/compiler-dom': 3.4.15 + '@vue/compiler-ssr': 3.4.15 + '@vue/shared': 3.4.15 + estree-walker: 2.0.2 + magic-string: 0.30.5 + postcss: 8.4.33 + source-map-js: 1.0.2 + + '@vue/compiler-ssr@3.4.15': + dependencies: + '@vue/compiler-dom': 3.4.15 + '@vue/shared': 3.4.15 + + '@vue/devtools-api@6.5.1': {} + + '@vue/reactivity@3.4.15': + dependencies: + '@vue/shared': 3.4.15 + + '@vue/runtime-core@3.4.15': + dependencies: + '@vue/reactivity': 3.4.15 + '@vue/shared': 3.4.15 + + '@vue/runtime-dom@3.4.15': + dependencies: + '@vue/runtime-core': 3.4.15 + '@vue/shared': 3.4.15 + csstype: 3.1.3 + + '@vue/server-renderer@3.4.15(vue@3.4.15)': + dependencies: + '@vue/compiler-ssr': 3.4.15 + '@vue/shared': 3.4.15 + vue: 3.4.15 + + '@vue/shared@3.4.15': {} + + '@vueuse/core@10.7.2(vue@3.4.15)': + dependencies: + '@types/web-bluetooth': 0.0.20 + '@vueuse/metadata': 10.7.2 + '@vueuse/shared': 10.7.2(vue@3.4.15) + vue-demi: 0.14.6(vue@3.4.15) + transitivePeerDependencies: + - '@vue/composition-api' + - vue + + '@vueuse/integrations@10.7.2(focus-trap@7.5.4)(vue@3.4.15)': + dependencies: + '@vueuse/core': 10.7.2(vue@3.4.15) + '@vueuse/shared': 10.7.2(vue@3.4.15) + vue-demi: 0.14.6(vue@3.4.15) + optionalDependencies: + focus-trap: 7.5.4 + transitivePeerDependencies: + - '@vue/composition-api' + - vue + + '@vueuse/metadata@10.7.2': {} + + '@vueuse/shared@10.7.2(vue@3.4.15)': + dependencies: + vue-demi: 0.14.6(vue@3.4.15) + transitivePeerDependencies: + - '@vue/composition-api' + - vue + + algoliasearch@4.22.1: + dependencies: + '@algolia/cache-browser-local-storage': 4.22.1 + '@algolia/cache-common': 4.22.1 + '@algolia/cache-in-memory': 4.22.1 + '@algolia/client-account': 4.22.1 + '@algolia/client-analytics': 4.22.1 + '@algolia/client-common': 4.22.1 + '@algolia/client-personalization': 4.22.1 + '@algolia/client-search': 4.22.1 + '@algolia/logger-common': 4.22.1 + '@algolia/logger-console': 4.22.1 + '@algolia/requester-browser-xhr': 4.22.1 + '@algolia/requester-common': 4.22.1 + '@algolia/requester-node-http': 4.22.1 + '@algolia/transporter': 4.22.1 + + csstype@3.1.3: {} + + entities@4.5.0: {} + + esbuild@0.21.5: + optionalDependencies: + '@esbuild/aix-ppc64': 0.21.5 + '@esbuild/android-arm': 0.21.5 + '@esbuild/android-arm64': 0.21.5 + '@esbuild/android-x64': 0.21.5 + '@esbuild/darwin-arm64': 0.21.5 + '@esbuild/darwin-x64': 0.21.5 + '@esbuild/freebsd-arm64': 0.21.5 + '@esbuild/freebsd-x64': 0.21.5 + '@esbuild/linux-arm': 0.21.5 + '@esbuild/linux-arm64': 0.21.5 + '@esbuild/linux-ia32': 0.21.5 + '@esbuild/linux-loong64': 0.21.5 + '@esbuild/linux-mips64el': 0.21.5 + '@esbuild/linux-ppc64': 0.21.5 + '@esbuild/linux-riscv64': 0.21.5 + '@esbuild/linux-s390x': 0.21.5 + '@esbuild/linux-x64': 0.21.5 + '@esbuild/netbsd-x64': 0.21.5 + '@esbuild/openbsd-x64': 0.21.5 + '@esbuild/sunos-x64': 0.21.5 + '@esbuild/win32-arm64': 0.21.5 + '@esbuild/win32-ia32': 0.21.5 + '@esbuild/win32-x64': 0.21.5 + + estree-walker@2.0.2: {} + + focus-trap@7.5.4: + dependencies: + tabbable: 6.2.0 + + fsevents@2.3.3: + optional: true + + magic-string@0.30.5: + dependencies: + '@jridgewell/sourcemap-codec': 1.4.15 + + mark.js@8.11.1: {} + + minisearch@6.3.0: {} + + nanoid@3.3.7: {} + + picocolors@1.0.0: {} + + picocolors@1.1.0: {} + + postcss@8.4.33: + dependencies: + nanoid: 3.3.7 + picocolors: 1.0.0 + source-map-js: 1.0.2 + + postcss@8.4.47: + dependencies: + nanoid: 3.3.7 + picocolors: 1.1.0 + source-map-js: 1.2.1 + + preact@10.19.3: {} + + rollup@4.22.4: + dependencies: + '@types/estree': 1.0.5 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.22.4 + '@rollup/rollup-android-arm64': 4.22.4 + '@rollup/rollup-darwin-arm64': 4.22.4 + '@rollup/rollup-darwin-x64': 4.22.4 + '@rollup/rollup-linux-arm-gnueabihf': 4.22.4 + '@rollup/rollup-linux-arm-musleabihf': 4.22.4 + '@rollup/rollup-linux-arm64-gnu': 4.22.4 + '@rollup/rollup-linux-arm64-musl': 4.22.4 + '@rollup/rollup-linux-powerpc64le-gnu': 4.22.4 + '@rollup/rollup-linux-riscv64-gnu': 4.22.4 + '@rollup/rollup-linux-s390x-gnu': 4.22.4 + '@rollup/rollup-linux-x64-gnu': 4.22.4 + '@rollup/rollup-linux-x64-musl': 4.22.4 + '@rollup/rollup-win32-arm64-msvc': 4.22.4 + '@rollup/rollup-win32-ia32-msvc': 4.22.4 + '@rollup/rollup-win32-x64-msvc': 4.22.4 + fsevents: 2.3.3 + + search-insights@2.13.0: {} + + shikiji-core@0.10.2: {} + + shikiji-transformers@0.10.2: + dependencies: + shikiji: 0.10.2 + + shikiji@0.10.2: + dependencies: + shikiji-core: 0.10.2 + + source-map-js@1.0.2: {} + + source-map-js@1.2.1: {} + + tabbable@6.2.0: {} + + to-fast-properties@2.0.0: {} + + vite@5.4.7: + dependencies: + esbuild: 0.21.5 + postcss: 8.4.47 + rollup: 4.22.4 + optionalDependencies: + fsevents: 2.3.3 + + vitepress@1.0.0-rc.40(@algolia/client-search@4.22.1)(postcss@8.4.47)(search-insights@2.13.0): + dependencies: + '@docsearch/css': 3.5.2 + '@docsearch/js': 3.5.2(@algolia/client-search@4.22.1)(search-insights@2.13.0) + '@types/markdown-it': 13.0.7 + '@vitejs/plugin-vue': 5.0.3(vite@5.4.7)(vue@3.4.15) + '@vue/devtools-api': 6.5.1 + '@vueuse/core': 10.7.2(vue@3.4.15) + '@vueuse/integrations': 10.7.2(focus-trap@7.5.4)(vue@3.4.15) + focus-trap: 7.5.4 + mark.js: 8.11.1 + minisearch: 6.3.0 + shikiji: 0.10.2 + shikiji-core: 0.10.2 + shikiji-transformers: 0.10.2 + vite: 5.4.7 + vue: 3.4.15 + optionalDependencies: + postcss: 8.4.47 + transitivePeerDependencies: - '@algolia/client-search' - '@types/node' - '@types/react' @@ -1004,6 +1122,7 @@ packages: - react - react-dom - sass + - sass-embedded - search-insights - sortablejs - stylus @@ -1011,34 +1130,15 @@ packages: - terser - typescript - universal-cookie - dev: true - /vue-demi@0.14.6(vue@3.4.15): - resolution: {integrity: sha512-8QA7wrYSHKaYgUxDA5ZC24w+eHm3sYCbp0EzcDwKqN3p6HqtTCGR/GVsPyZW92unff4UlcSh++lmqDWN3ZIq4w==} - engines: {node: '>=12'} - hasBin: true - requiresBuild: true - peerDependencies: - '@vue/composition-api': ^1.0.0-rc.1 - vue: ^3.0.0-0 || ^2.6.0 - peerDependenciesMeta: - '@vue/composition-api': - optional: true + vue-demi@0.14.6(vue@3.4.15): dependencies: vue: 3.4.15 - dev: true - /vue@3.4.15: - resolution: {integrity: sha512-jC0GH4KkWLWJOEQjOpkqU1bQsBwf4R1rsFtw5GQJbjHVKWDzO6P0nWWBTmjp1xSemAioDFj1jdaK1qa3DnMQoQ==} - peerDependencies: - typescript: '*' - peerDependenciesMeta: - typescript: - optional: true + vue@3.4.15: dependencies: '@vue/compiler-dom': 3.4.15 '@vue/compiler-sfc': 3.4.15 '@vue/runtime-dom': 3.4.15 '@vue/server-renderer': 3.4.15(vue@3.4.15) '@vue/shared': 3.4.15 - dev: true diff --git a/docs_new/rbac.md b/docs_new/rbac.md new file mode 100644 index 0000000000..fd83a44588 --- /dev/null +++ b/docs_new/rbac.md @@ -0,0 +1,65 @@ +# RBAC Configuration {#rbac} + +The `edit` `ClusterRole` is a built-in Kubernetes system role that offers +permissions to modify most objects within a namespace, excluding roles, +role bindings, and resource quotas. This role allows access to create, update, +delete, and view resources such as Deployments, Pods, Services, ConfigMaps, +PersistentVolumeClaims, and more. The Kanister Helm chart used to assign the +`edit` `ClusterRole` to the Kanister service account, which granted all the +permissions mentioned in the `edit` `ClusterRole` to the Kanister application. + +To enhance security, the `edit` `ClusterRoleBinding` has been removed from +the Kanister Helm Chart. Users are required to create their own +`Role`/`RoleBinding` in the application's namespace to grant the necessary +permissions to Kanister's Service Account, providing more control over +the specific permissions granted. + + +## Creating a RoleBinding with edit ClusterRole + +To allow Kanister to perform backup/restore operations in the application +namespace, create a `RoleBinding` in the application namespace that assigns +the `edit` `ClusterRole` to Kanister's Service Account: + +```bash +kubectl create rolebinding kanister-edit-binding --clusterrole=edit \ +--serviceaccount=:-kanister-operator \ +--namespace= +``` + +## Creating a Role with Granular Permissions + +If the Blueprint doesn't require access to all the resources that are included +in the `edit` ClusterRole, you can create a `Role` in the application namespace +with just the specific resources and verbs that Blueprint needs, and a `RoleBinding` +in the application namespace to bind the `Role` to Kanister's Service Account. +This approach enhances security by granting only the necessary permissions. + +1. Create a `Role` with the permissions required by the Blueprint: + + ```yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: kanister-role + namespace: + rules: + - apiGroups: [""] + resources: ["pods", "pods/log", "persistentvolumeclaims", "secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["apps"] + resources: ["deployments", "statefulsets"] + verbs: ["get", "list", "watch"] + ``` + +2. Create a `RoleBinding` to bind the `Role` to Kanister's Service Account: + + ```bash + kubectl create rolebinding kanister-role-binding --role=kanister-role \ + --serviceaccount=:-kanister-operator \ + --namespace= + ``` + +After setting up the required `Role`/`RoleBinding`, Kanister will be able +to successfully perform snapshot and restore operations in the application's +namespace. \ No newline at end of file diff --git a/docs_new/templates.md b/docs_new/templates.md index 9a61e6cd58..c750409ca7 100644 --- a/docs_new/templates.md +++ b/docs_new/templates.md @@ -59,9 +59,8 @@ The TemplateParams struct has one field for each well known object type, which is effectively a union in go. Other than the types mentioned above, Kanister can also act on any -Kubernetes object such as a CRD and the `object`{.interpreted-text -role="ref"} field in TemplateParams is populated with the unstructured -content of those. +Kubernetes object such as a CRD and the [object](#object) field in +TemplateParams is populated with the unstructured content of those. Each param struct described below is a set of useful fields related to the Object. diff --git a/docs_new/tutorial.md b/docs_new/tutorial.md index dd8e9f9d0a..89655f0945 100644 --- a/docs_new/tutorial.md +++ b/docs_new/tutorial.md @@ -11,10 +11,10 @@ use more of Kanister's features to manage the application's data. - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) installed and setup - [helm](https://helm.sh) installed and initialized using the command - [helm init]{.title-ref} + `helm init` - docker -- A running Kanister controller. See `install`{.interpreted-text - role="ref"} +- A running Kanister controller. See [install](install) +- Proper RBAC configured for the Kanister controller. See [RBAC Configuration](rbac) - Access to an S3 bucket and credentials. ## Example Application @@ -33,7 +33,10 @@ kind: Deployment metadata: name: time-logger spec: - replicas: 1 + replicas: 1 + selector: + matchLabels: + app: time-logger template: metadata: labels: @@ -185,8 +188,7 @@ choose where to store the log based on values in a ConfigMap. ConfigMaps are referenced in an ActionSet, which are fetched by the controller and made available to Blueprints through parameter templating. -For more on templating in Kanister, see `templates`{.interpreted-text -role="ref"}. +For more on templating in Kanister, see [templates](templates). In this section of the tutorial, we\'re going to use a ConfigMap to choose where to backup our time log. We\'ll name our ConfigMap and @@ -313,8 +315,7 @@ the ConfigMap, we can also push the log to S3. In this Secret, we store the credentials as binary data. We can use the templating engine `toString` and `quote` functions, courtesy of sprig. -For more on this templating, see `templates`{.interpreted-text -role="ref"} +For more on this templating, see [templates](templates) ``` yaml cat <` +* Kanister controller version 0.111.0 installed in your cluster, let's say in namespace `` * Kanctl CLI installed (https://docs.kanister.io/tooling.html#kanctl) To install kanister and related tools you can follow [this](https://docs.kanister.io/install.html#install) link. @@ -29,7 +29,7 @@ $ helm repo add bitnami https://charts.bitnami.com/bitnami $ helm repo update # remove app-namespace with the namespace you want to deploy the Cassandra app in $ kubectl create ns -$ helm install cassandra bitnami/cassandra --namespace --set image.repository=kanisterio/cassandra --set image.tag=0.110.0 --set cluster.replicaCount=2 --set image.registry=ghcr.io --set image.pullPolicy=Always +$ helm install cassandra bitnami/cassandra --namespace --set image.repository=kanisterio/cassandra --set image.tag=0.111.0 --set cluster.replicaCount=2 --set image.registry=ghcr.io --set image.pullPolicy=Always ``` diff --git a/examples/cassandra/cassandra-blueprint.yaml b/examples/cassandra/cassandra-blueprint.yaml index af2453f7d9..a4a4ee31d0 100644 --- a/examples/cassandra/cassandra-blueprint.yaml +++ b/examples/cassandra/cassandra-blueprint.yaml @@ -130,7 +130,7 @@ actions: name: restoreFromObjectStore args: namespace: "{{ .StatefulSet.Namespace }}" - image: ghcr.io/kanisterio/kanister-tools:0.110.0 + image: ghcr.io/kanisterio/kanister-tools:0.111.0 backupArtifactPrefix: "{{ .ArtifactsIn.params.KeyValue.backupPrefixLocation }}" pods: "{{ range .StatefulSet.Pods }} {{.}}{{end}}" restorePath: "{{ .ArtifactsIn.params.KeyValue.restorePathPrefix }}" diff --git a/examples/cockroachdb/cockroachdb-blueprint.yaml b/examples/cockroachdb/cockroachdb-blueprint.yaml index 7e6f91746b..69912b8b40 100644 --- a/examples/cockroachdb/cockroachdb-blueprint.yaml +++ b/examples/cockroachdb/cockroachdb-blueprint.yaml @@ -151,7 +151,7 @@ actions: - func: KubeTask name: deleteFromS3Store args: - image: ghcr.io/kanisterio/kanister-tools:0.110.0 + image: ghcr.io/kanisterio/kanister-tools:0.111.0 namespace: "{{ .Namespace.Name }}" command: - bash diff --git a/examples/couchbase/blueprint-v2/couchbase-blueprint.yaml b/examples/couchbase/blueprint-v2/couchbase-blueprint.yaml index 1bf549926a..cb3ff7967d 100644 --- a/examples/couchbase/blueprint-v2/couchbase-blueprint.yaml +++ b/examples/couchbase/blueprint-v2/couchbase-blueprint.yaml @@ -21,7 +21,7 @@ actions: namespace: "{{ .Object.metadata.namespace }}" args: namespace: "{{ .Object.metadata.namespace }}" - image: ghcr.io/kanisterio/couchbase-tools:0.110.0 + image: ghcr.io/kanisterio/couchbase-tools:0.111.0 command: - bash - -o @@ -58,7 +58,7 @@ actions: namespace: "{{ .Object.metadata.namespace }}" args: namespace: "{{ .Object.metadata.namespace }}" - image: ghcr.io/kanisterio/couchbase-tools:0.110.0 + image: ghcr.io/kanisterio/couchbase-tools:0.111.0 command: - bash - -o @@ -89,7 +89,7 @@ actions: name: deleteBackup args: namespace: "{{ .Namespace.Name }}" - image: ghcr.io/kanisterio/couchbase-tools:0.110.0 + image: ghcr.io/kanisterio/couchbase-tools:0.111.0 command: - bash - -o diff --git a/examples/couchbase/couchbase-blueprint.yaml b/examples/couchbase/couchbase-blueprint.yaml index 3b08469b0a..b41670dcc7 100644 --- a/examples/couchbase/couchbase-blueprint.yaml +++ b/examples/couchbase/couchbase-blueprint.yaml @@ -79,7 +79,7 @@ actions: name: deleteBackup args: namespace: "{{ .Namespace.Name }}" - image: ghcr.io/kanisterio/kanister-tools:0.110.0 + image: ghcr.io/kanisterio/kanister-tools:0.111.0 command: - bash - -o diff --git a/examples/csi-snapshot/README.md b/examples/csi-snapshot/README.md index 5c187728a7..9879378ad8 100644 --- a/examples/csi-snapshot/README.md +++ b/examples/csi-snapshot/README.md @@ -8,7 +8,7 @@ This example demonstrates Kanister's ability to protect an application called Ti - Helm 3 installed - Kubernetes 1.16+ with Beta APIs enabled -- Kanister controller version 0.110.0 installed in the cluster, let's assume in namespace `kanister` +- Kanister controller version 0.111.0 installed in the cluster, let's assume in namespace `kanister` - Kanctl CLI installed (https://docs.kanister.io/tooling.html#install-the-tools) - VolumeSnapshot CRDs, Snapshot Controller & a CSI Driver diff --git a/examples/elasticsearch/README.md b/examples/elasticsearch/README.md index 4027dfb804..ebb6e6ffe6 100644 --- a/examples/elasticsearch/README.md +++ b/examples/elasticsearch/README.md @@ -20,7 +20,7 @@ moving on to Elasticsearch 6.0. * Kubernetes 1.20+ * PV provisioner support in the underlying infrastructure -* Kanister controller version 0.110.0 installed in your cluster +* Kanister controller version 0.111.0 installed in your cluster * Kanctl CLI installed (https://docs.kanister.io/tooling.html#install-the-tools) ## StatefulSets Details @@ -74,7 +74,7 @@ Add Kanister Helm repository and install Kanister operator ```bash $ helm repo add kanister https://charts.kanister.io $ helm install kanister --namespace kanister --create-namespace \ - kanister/kanister-operator --set image.tag=0.110.0 + kanister/kanister-operator --set image.tag=0.111.0 ``` ### Create Profile diff --git a/examples/elasticsearch/blueprint-v2/elasticsearch-blueprint.yaml b/examples/elasticsearch/blueprint-v2/elasticsearch-blueprint.yaml index 37f508b038..d9dee8016c 100644 --- a/examples/elasticsearch/blueprint-v2/elasticsearch-blueprint.yaml +++ b/examples/elasticsearch/blueprint-v2/elasticsearch-blueprint.yaml @@ -20,7 +20,7 @@ actions: namespace: "{{ .StatefulSet.Namespace }}" args: namespace: "{{ .StatefulSet.Namespace }}" - image: "ghcr.io/kanisterio/es-sidecar:0.110.0" + image: "ghcr.io/kanisterio/es-sidecar:0.111.0" command: - bash - -o @@ -50,7 +50,7 @@ actions: namespace: "{{ .StatefulSet.Namespace }}" args: namespace: "{{ .StatefulSet.Namespace }}" - image: "ghcr.io/kanisterio/es-sidecar:0.110.0" + image: "ghcr.io/kanisterio/es-sidecar:0.111.0" command: - bash - -o @@ -75,7 +75,7 @@ actions: name: deleteFromStore args: namespace: "{{ .Namespace.Name }}" - image: "ghcr.io/kanisterio/es-sidecar:0.110.0" + image: "ghcr.io/kanisterio/es-sidecar:0.111.0" command: - bash - -o diff --git a/examples/elasticsearch/elasticsearch-blueprint.yaml b/examples/elasticsearch/elasticsearch-blueprint.yaml index cd38a467fc..fc81dca1ab 100644 --- a/examples/elasticsearch/elasticsearch-blueprint.yaml +++ b/examples/elasticsearch/elasticsearch-blueprint.yaml @@ -18,7 +18,7 @@ actions: namespace: "{{ .StatefulSet.Namespace }}" args: namespace: "{{ .StatefulSet.Namespace }}" - image: "ghcr.io/kanisterio/es-sidecar:0.110.0" + image: "ghcr.io/kanisterio/es-sidecar:0.111.0" command: - bash - -o @@ -48,7 +48,7 @@ actions: namespace: "{{ .StatefulSet.Namespace }}" args: namespace: "{{ .StatefulSet.Namespace }}" - image: "ghcr.io/kanisterio/es-sidecar:0.110.0" + image: "ghcr.io/kanisterio/es-sidecar:0.111.0" command: - bash - -o @@ -69,7 +69,7 @@ actions: name: deleteFromObjectStore args: namespace: "{{ .Namespace.Name }}" - image: "ghcr.io/kanisterio/es-sidecar:0.110.0" + image: "ghcr.io/kanisterio/es-sidecar:0.111.0" command: - bash - -o diff --git a/examples/etcd/etcd-in-cluster/k8s/etcd-incluster-blueprint.yaml b/examples/etcd/etcd-in-cluster/k8s/etcd-incluster-blueprint.yaml index 175ec2cf4c..08311ceca1 100644 --- a/examples/etcd/etcd-in-cluster/k8s/etcd-incluster-blueprint.yaml +++ b/examples/etcd/etcd-in-cluster/k8s/etcd-incluster-blueprint.yaml @@ -12,7 +12,7 @@ actions: - func: KubeTask name: takeSnapshot args: - image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.110.0 + image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.111.0 command: - sh - -o @@ -37,7 +37,7 @@ actions: - func: KubeTask name: uploadSnapshot args: - image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.110.0 + image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.111.0 command: - sh - -o @@ -55,7 +55,7 @@ actions: - func: KubeTask name: removeSnapshot args: - image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.110.0 + image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.111.0 command: - sh - -o @@ -74,7 +74,7 @@ actions: name: deleteFromObjectStore args: namespace: "{{ .Namespace.Name }}" - image: "ghcr.io/kanisterio/kanister-tools:0.110.0" + image: "ghcr.io/kanisterio/kanister-tools:0.111.0" command: - bash - -o diff --git a/examples/etcd/etcd-in-cluster/ocp/blueprint-v2/etcd-incluster-ocp-blueprint.yaml b/examples/etcd/etcd-in-cluster/ocp/blueprint-v2/etcd-incluster-ocp-blueprint.yaml index ebfdcc22e9..f13e90527a 100644 --- a/examples/etcd/etcd-in-cluster/ocp/blueprint-v2/etcd-incluster-ocp-blueprint.yaml +++ b/examples/etcd/etcd-in-cluster/ocp/blueprint-v2/etcd-incluster-ocp-blueprint.yaml @@ -11,7 +11,7 @@ actions: - func: KubeTask name: takeSnapshot args: - image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.110.0 + image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.111.0 command: - sh - -o @@ -34,7 +34,7 @@ actions: - func: KubeTask name: uploadSnapshot args: - image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.110.0 + image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.111.0 command: - sh - -o @@ -50,7 +50,7 @@ actions: - func: KubeTask name: removeSnapshot args: - image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.110.0 + image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.111.0 command: - sh - -o @@ -73,7 +73,7 @@ actions: - func: PrepareData name: copyFromObjectStore args: - image: "ghcr.io/kanisterio/kanister-tools:0.110.0" + image: "ghcr.io/kanisterio/kanister-tools:0.111.0" namespace: "{{ .Object.metadata.namespace }}" podOverride: nodeSelector: @@ -108,7 +108,7 @@ actions: name: deleteFromObjectStore args: namespace: "{{ .Namespace.Name }}" - image: "ghcr.io/kanisterio/kanister-tools:0.110.0" + image: "ghcr.io/kanisterio/kanister-tools:0.111.0" command: - bash - -o diff --git a/examples/etcd/etcd-in-cluster/ocp/etcd-incluster-ocp-blueprint.yaml b/examples/etcd/etcd-in-cluster/ocp/etcd-incluster-ocp-blueprint.yaml index b479bcb871..5cb29bd5f5 100644 --- a/examples/etcd/etcd-in-cluster/ocp/etcd-incluster-ocp-blueprint.yaml +++ b/examples/etcd/etcd-in-cluster/ocp/etcd-incluster-ocp-blueprint.yaml @@ -12,7 +12,7 @@ actions: - func: KubeTask name: takeSnapshot args: - image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.110.0 + image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.111.0 command: - sh - -o @@ -35,7 +35,7 @@ actions: - func: KubeTask name: uploadSnapshot args: - image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.110.0 + image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.111.0 command: - sh - -o @@ -53,7 +53,7 @@ actions: - func: KubeTask name: removeSnapshot args: - image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.110.0 + image: ghcr.io/kanisterio/kanister-kubectl-1.18:0.111.0 command: - sh - -o @@ -72,7 +72,7 @@ actions: name: deleteFromObjectStore args: namespace: "{{ .Namespace.Name }}" - image: "ghcr.io/kanisterio/kanister-tools:0.110.0" + image: "ghcr.io/kanisterio/kanister-tools:0.111.0" command: - bash - -o diff --git a/examples/foundationdb/README.md b/examples/foundationdb/README.md index f0267f52ad..4dc2855f94 100644 --- a/examples/foundationdb/README.md +++ b/examples/foundationdb/README.md @@ -24,7 +24,7 @@ cluster. on you cluster. * Kubernetes 1.9+ with Beta APIs enabled. * PV support on the underlying infrastructure. -* Kanister version 0.110.0 with `profiles.cr.kanister.io` CRD installed. +* Kanister version 0.111.0 with `profiles.cr.kanister.io` CRD installed. * Docker CLI installed * A docker image containing the required tools to back up FoundationDB. The Dockerfile for the image can be found [here](https://raw.githubusercontent.com/kanisterio/kanister/master/docker/foundationdb/Dockerfile). diff --git a/examples/foundationdb/blueprint-v2/foundationdb-blueprint.yaml b/examples/foundationdb/blueprint-v2/foundationdb-blueprint.yaml index 49fe2e4381..59dc56853f 100644 --- a/examples/foundationdb/blueprint-v2/foundationdb-blueprint.yaml +++ b/examples/foundationdb/blueprint-v2/foundationdb-blueprint.yaml @@ -77,7 +77,7 @@ actions: name: deleteBackup args: namespace: "{{ .Namespace.Name }}" - image: ghcr.io/kanisterio/kanister-tools:0.110.0 + image: ghcr.io/kanisterio/kanister-tools:0.111.0 command: - bash - -o diff --git a/examples/k8ssandra/README.md b/examples/k8ssandra/README.md index 0d601406f4..d5a24a1489 100644 --- a/examples/k8ssandra/README.md +++ b/examples/k8ssandra/README.md @@ -8,7 +8,7 @@ K8ssandra operator uses Medusa to backup and restore Cassandra data. Kanister ca * Kubernetes 1.17+ * PV support on the underlying infrastructure -* Kanister controller version 0.110.0 installed in your cluster +* Kanister controller version 0.111.0 installed in your cluster * Kanctl CLI installed (https://docs.kanister.io/tooling.html#kanctl) * K8ssandra needs at least 4 cores and 8GB of RAM available to Docker and appropriate heap sizes for Cassandra and Stargate. If you don’t have those resources available, you can avoid deploying features such as monitoring, Reaper and Medusa, and also reduce the number of Cassandra nodes. diff --git a/examples/kafka/adobe-s3-connector/README.md b/examples/kafka/adobe-s3-connector/README.md index 18e2ac455b..5bdb47cafb 100644 --- a/examples/kafka/adobe-s3-connector/README.md +++ b/examples/kafka/adobe-s3-connector/README.md @@ -6,7 +6,7 @@ During restore, topic messages are purged before the restore operation is perfor ## Prerequisites * Kubernetes 1.9+ -* Kanister controller version 0.110.0 installed in the cluster in a namespace . This example uses `kanister` namespace +* Kanister controller version 0.111.0 installed in the cluster in a namespace . This example uses `kanister` namespace * Kanctl CLI installed (https://docs.kanister.io/tooling.html#kanctl) ## Assumption diff --git a/examples/maria/blueprint-v2/maria-blueprint.yaml b/examples/maria/blueprint-v2/maria-blueprint.yaml index 615bf6ccfd..a69769c393 100644 --- a/examples/maria/blueprint-v2/maria-blueprint.yaml +++ b/examples/maria/blueprint-v2/maria-blueprint.yaml @@ -19,7 +19,7 @@ actions: name: '{{ .StatefulSet.Name }}' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .StatefulSet.Namespace }}" command: - bash @@ -53,7 +53,7 @@ actions: name: '{{ .StatefulSet.Name }}' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .StatefulSet.Namespace }}" command: - bash @@ -76,7 +76,7 @@ actions: - func: KubeTask name: deleteFromStore args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .Namespace.Name }}" command: - bash diff --git a/examples/maria/maria-blueprint.yaml b/examples/maria/maria-blueprint.yaml index 5a8ccb0ac3..7e6744ddbb 100644 --- a/examples/maria/maria-blueprint.yaml +++ b/examples/maria/maria-blueprint.yaml @@ -17,7 +17,7 @@ actions: name: '{{ .StatefulSet.Name }}' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .StatefulSet.Namespace }}" command: - bash @@ -49,7 +49,7 @@ actions: name: '{{ .StatefulSet.Name }}' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .StatefulSet.Namespace }}" command: - bash @@ -69,7 +69,7 @@ actions: - func: KubeTask name: deleteFromBlobStore args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .Namespace.Name }}" command: - bash diff --git a/examples/mongo-sidecar/README.md b/examples/mongo-sidecar/README.md index 8c4726b792..775bc57001 100644 --- a/examples/mongo-sidecar/README.md +++ b/examples/mongo-sidecar/README.md @@ -7,7 +7,7 @@ This is an example of using Kanister to backup and restore MongoDB. In this exam - Kubernetes 1.20+ - PV provisioner support in the underlying infrastructure -- Kanister controller version 0.110.0 installed in your cluster, let's assume in Namespace `kanister` +- Kanister controller version 0.111.0 installed in your cluster, let's assume in Namespace `kanister` - Kanctl CLI installed (https://docs.kanister.io/tooling.html#install-the-tools) diff --git a/examples/mongodb-atlas/README.md b/examples/mongodb-atlas/README.md index 234cea037c..e6d25354f3 100644 --- a/examples/mongodb-atlas/README.md +++ b/examples/mongodb-atlas/README.md @@ -7,7 +7,7 @@ It deploys and scales a MongoDB cluster in the cloud. ## Prerequisites * Kubernetes 1.20+ -* Kanister controller version 0.110.0 installed in your cluster +* Kanister controller version 0.111.0 installed in your cluster * Kanctl CLI installed (https://docs.kanister.io/tooling.html#install-the-tools) * Already provisioned MongoDB Atlas cluster (https://www.mongodb.com/docs/atlas/getting-started) @@ -19,7 +19,7 @@ to install. ```bash $ helm repo add kanister https://charts.kanister.io $ helm install kanister --namespace kanister --create-namespace \ - kanister/kanister-operator --set image.tag=0.110.0 + kanister/kanister-operator --set image.tag=0.111.0 ``` ### Create Blueprint diff --git a/examples/mongodb-deploymentconfig/README.md b/examples/mongodb-deploymentconfig/README.md index 8a0e9ec07f..8a23c1ffd8 100644 --- a/examples/mongodb-deploymentconfig/README.md +++ b/examples/mongodb-deploymentconfig/README.md @@ -14,7 +14,7 @@ cluster's DeploymentConfig resources. - Setup OpenShift, you can follow steps mentioned below - PV provisioner support in the underlying infrastructure -- Kanister controller version 0.110.0 installed in your cluster in namespace `kanister` +- Kanister controller version 0.111.0 installed in your cluster in namespace `kanister` - Kanctl CLI installed (https://docs.kanister.io/tooling.html#kanctl) **Note** diff --git a/examples/mongodb-deploymentconfig/blueprint-v2/mongo-dep-config-blueprint.yaml b/examples/mongodb-deploymentconfig/blueprint-v2/mongo-dep-config-blueprint.yaml index 7da5f628ad..f488eb7408 100644 --- a/examples/mongodb-deploymentconfig/blueprint-v2/mongo-dep-config-blueprint.yaml +++ b/examples/mongodb-deploymentconfig/blueprint-v2/mongo-dep-config-blueprint.yaml @@ -20,7 +20,7 @@ actions: namespace: "{{ .DeploymentConfig.Namespace }}" args: namespace: "{{ .DeploymentConfig.Namespace }}" - image: ghcr.io/kanisterio/mongodb:0.110.0 + image: ghcr.io/kanisterio/mongodb:0.111.0 command: - bash - -o @@ -50,7 +50,7 @@ actions: namespace: "{{ .DeploymentConfig.Namespace }}" args: namespace: "{{ .DeploymentConfig.Namespace }}" - image: ghcr.io/kanisterio/mongodb:0.110.0 + image: ghcr.io/kanisterio/mongodb:0.111.0 command: - bash - -o @@ -75,7 +75,7 @@ actions: name: deleteFromStore args: namespace: "{{ .Namespace.Name }}" - image: ghcr.io/kanisterio/mongodb:0.110.0 + image: ghcr.io/kanisterio/mongodb:0.111.0 command: - bash - -o diff --git a/examples/mongodb-deploymentconfig/mongo-dep-config-blueprint.yaml b/examples/mongodb-deploymentconfig/mongo-dep-config-blueprint.yaml index 596059225e..72c73fe9d6 100644 --- a/examples/mongodb-deploymentconfig/mongo-dep-config-blueprint.yaml +++ b/examples/mongodb-deploymentconfig/mongo-dep-config-blueprint.yaml @@ -18,7 +18,7 @@ actions: namespace: "{{ .DeploymentConfig.Namespace }}" args: namespace: "{{ .DeploymentConfig.Namespace }}" - image: ghcr.io/kanisterio/mongodb:0.110.0 + image: ghcr.io/kanisterio/mongodb:0.111.0 command: - bash - -o @@ -45,7 +45,7 @@ actions: namespace: "{{ .DeploymentConfig.Namespace }}" args: namespace: "{{ .DeploymentConfig.Namespace }}" - image: ghcr.io/kanisterio/mongodb:0.110.0 + image: ghcr.io/kanisterio/mongodb:0.111.0 command: - bash - -o @@ -66,7 +66,7 @@ actions: name: deleteFromBlobStore args: namespace: "{{ .Namespace.Name }}" - image: ghcr.io/kanisterio/mongodb:0.110.0 + image: ghcr.io/kanisterio/mongodb:0.111.0 command: - bash - -o diff --git a/examples/mongodb-restic/README.md b/examples/mongodb-restic/README.md index 06117c4edb..1214227876 100644 --- a/examples/mongodb-restic/README.md +++ b/examples/mongodb-restic/README.md @@ -7,7 +7,7 @@ * Kubernetes 1.9+ * Kubernetes beta APIs enabled only if `podDisruptionBudget` is enabled * PV support on the underlying infrastructure -* Kanister controller version 0.110.0 installed in your cluster +* Kanister controller version 0.111.0 installed in your cluster * Kanctl CLI installed (https://docs.kanister.io/tooling.html#kanctl) ## Chart Details @@ -28,7 +28,7 @@ $ kubectl create namespace mongo-test $ helm install my-release bitnami/mongodb --namespace mongo-test \ --set architecture="replicaset" \ --set image.repository=ghcr.io/kanisterio/mongodb \ - --set image.tag=0.110.0 + --set image.tag=0.111.0 ``` The command deploys MongoDB on the Kubernetes cluster in the mongo-test namespace diff --git a/examples/mongodb-restic/mongodb-blueprint.yaml b/examples/mongodb-restic/mongodb-blueprint.yaml index 04ae415049..81c36871eb 100644 --- a/examples/mongodb-restic/mongodb-blueprint.yaml +++ b/examples/mongodb-restic/mongodb-blueprint.yaml @@ -39,7 +39,7 @@ actions: name: restorePrimary args: namespace: "{{ .StatefulSet.Namespace }}" - image: ghcr.io/kanisterio/kanister-tools:0.110.0 + image: ghcr.io/kanisterio/kanister-tools:0.111.0 backupArtifactPrefix: "{{ .Profile.Location.Bucket }}/mongodb-backups/{{ .StatefulSet.Name }}/rs_backup" backupInfo: "{{ .ArtifactsIn.backupInfo.KeyValue.backupIdentifier }}" diff --git a/examples/mongodb/README.md b/examples/mongodb/README.md index 2212404006..3407cc99b5 100644 --- a/examples/mongodb/README.md +++ b/examples/mongodb/README.md @@ -7,7 +7,7 @@ * Kubernetes 1.20+ * Kubernetes beta APIs enabled only if `podDisruptionBudget` is enabled * PV support on the underlying infrastructure -* Kanister controller version 0.110.0 installed in your cluster +* Kanister controller version 0.111.0 installed in your cluster * Kanctl CLI installed (https://docs.kanister.io/tooling.html#install-the-tools) ## Chart Details diff --git a/examples/mongodb/blueprint-v2/mongo-blueprint.yaml b/examples/mongodb/blueprint-v2/mongo-blueprint.yaml index 661537d7eb..22fec81c06 100644 --- a/examples/mongodb/blueprint-v2/mongo-blueprint.yaml +++ b/examples/mongodb/blueprint-v2/mongo-blueprint.yaml @@ -20,7 +20,7 @@ actions: namespace: "{{ .StatefulSet.Namespace }}" args: namespace: "{{ .StatefulSet.Namespace }}" - image: ghcr.io/kanisterio/mongodb:0.110.0 + image: ghcr.io/kanisterio/mongodb:0.111.0 command: - bash - -o @@ -49,7 +49,7 @@ actions: namespace: "{{ .StatefulSet.Namespace }}" args: namespace: "{{ .StatefulSet.Namespace }}" - image: ghcr.io/kanisterio/mongodb:0.110.0 + image: ghcr.io/kanisterio/mongodb:0.111.0 command: - bash - -o @@ -74,7 +74,7 @@ actions: name: deleteFromStore args: namespace: "{{ .Namespace.Name }}" - image: ghcr.io/kanisterio/mongodb:0.110.0 + image: ghcr.io/kanisterio/mongodb:0.111.0 command: - bash - -o diff --git a/examples/mongodb/mongo-blueprint.yaml b/examples/mongodb/mongo-blueprint.yaml index 9987214547..bcfe80e38a 100644 --- a/examples/mongodb/mongo-blueprint.yaml +++ b/examples/mongodb/mongo-blueprint.yaml @@ -18,7 +18,7 @@ actions: namespace: "{{ .StatefulSet.Namespace }}" args: namespace: "{{ .StatefulSet.Namespace }}" - image: ghcr.io/kanisterio/mongodb:0.110.0 + image: ghcr.io/kanisterio/mongodb:0.111.0 command: - bash - -o @@ -44,7 +44,7 @@ actions: namespace: "{{ .StatefulSet.Namespace }}" args: namespace: "{{ .StatefulSet.Namespace }}" - image: ghcr.io/kanisterio/mongodb:0.110.0 + image: ghcr.io/kanisterio/mongodb:0.111.0 command: - bash - -o @@ -65,7 +65,7 @@ actions: name: deleteFromBlobStore args: namespace: "{{ .Namespace.Name }}" - image: ghcr.io/kanisterio/mongodb:0.110.0 + image: ghcr.io/kanisterio/mongodb:0.111.0 command: - bash - -o diff --git a/examples/mssql/README.md b/examples/mssql/README.md index f52875b6ed..24531d064b 100644 --- a/examples/mssql/README.md +++ b/examples/mssql/README.md @@ -9,7 +9,7 @@ This document will cover how to install SQL Server and how to run backup/restore - Kubernetes 1.16+ with Beta APIs enabled - PV provisioner support in the underlying infrastructure -- Kanister controller version 0.110.0 installed in your cluster, let's assume in Namespace `kanister` +- Kanister controller version 0.111.0 installed in your cluster, let's assume in Namespace `kanister` - Kanctl CLI installed (https://docs.kanister.io/tooling.html#install-the-tools) ## Installing Microsoft SQL Server diff --git a/examples/mssql/blueprint-v2/mssql-blueprint.yaml b/examples/mssql/blueprint-v2/mssql-blueprint.yaml index 6da0bb6a77..5f7e069350 100644 --- a/examples/mssql/blueprint-v2/mssql-blueprint.yaml +++ b/examples/mssql/blueprint-v2/mssql-blueprint.yaml @@ -16,7 +16,7 @@ actions: name: '{{ index .Object.metadata.labels "app" }}' namespace: '{{ .Deployment.Namespace }}' args: - image: ghcr.io/kanisterio/mssql-tools:0.110.0 + image: ghcr.io/kanisterio/mssql-tools:0.111.0 command: - bash - -o @@ -47,7 +47,7 @@ actions: name: '{{ index .Object.metadata.labels "app" }}' namespace: '{{ .Deployment.Namespace }}' args: - image: ghcr.io/kanisterio/mssql-tools:0.110.0 + image: ghcr.io/kanisterio/mssql-tools:0.111.0 command: - bash - -o @@ -74,7 +74,7 @@ actions: - func: KubeTask name: deleteFromBlobStore args: - image: ghcr.io/kanisterio/mssql-tools:0.110.0 + image: ghcr.io/kanisterio/mssql-tools:0.111.0 command: - bash - -o diff --git a/examples/mssql/mssql-blueprint.yaml b/examples/mssql/mssql-blueprint.yaml index 4fc1567bf2..05f4729bf9 100644 --- a/examples/mssql/mssql-blueprint.yaml +++ b/examples/mssql/mssql-blueprint.yaml @@ -14,7 +14,7 @@ actions: - func: KubeTask name: dumpToObjectStore args: - image: ghcr.io/kanisterio/mssql-tools:0.110.0 + image: ghcr.io/kanisterio/mssql-tools:0.111.0 command: - bash - -o @@ -45,7 +45,7 @@ actions: - func: KubeTask name: restoreFromObjectStore args: - image: ghcr.io/kanisterio/mssql-tools:0.110.0 + image: ghcr.io/kanisterio/mssql-tools:0.111.0 command: - bash - -o @@ -71,7 +71,7 @@ actions: - func: KubeTask name: deleteFromBlobStore args: - image: ghcr.io/kanisterio/mssql-tools:0.110.0 + image: ghcr.io/kanisterio/mssql-tools:0.111.0 command: - bash - -o diff --git a/examples/mysql-deploymentconfig/README.md b/examples/mysql-deploymentconfig/README.md index b753dd7c70..cc8fc6aa16 100644 --- a/examples/mysql-deploymentconfig/README.md +++ b/examples/mysql-deploymentconfig/README.md @@ -14,7 +14,7 @@ cluster's DeploymentConfig resources. - Setup OpenShift, you can follow steps mentioned below - PV provisioner support in the underlying infrastructure -- Kanister controller version 0.110.0 installed in your cluster in namespace `kanister` +- Kanister controller version 0.111.0 installed in your cluster in namespace `kanister` - Kanctl CLI installed (https://docs.kanister.io/tooling.html#kanctl) **Note** diff --git a/examples/mysql-deploymentconfig/blueprint-v2/mysql-dep-config-blueprint.yaml b/examples/mysql-deploymentconfig/blueprint-v2/mysql-dep-config-blueprint.yaml index 846f487750..194e45a184 100644 --- a/examples/mysql-deploymentconfig/blueprint-v2/mysql-dep-config-blueprint.yaml +++ b/examples/mysql-deploymentconfig/blueprint-v2/mysql-dep-config-blueprint.yaml @@ -19,7 +19,7 @@ actions: name: "{{ .DeploymentConfig.Name }}" namespace: "{{ .DeploymentConfig.Namespace }}" args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .DeploymentConfig.Namespace }}" command: - bash @@ -47,7 +47,7 @@ actions: name: "{{ .DeploymentConfig.Name }}" namespace: "{{ .DeploymentConfig.Namespace }}" args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .DeploymentConfig.Namespace }}" command: - bash @@ -71,7 +71,7 @@ actions: - func: KubeTask name: deleteFromStore args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .Namespace.Name }}" command: - bash diff --git a/examples/mysql-deploymentconfig/mysql-dep-config-blueprint.yaml b/examples/mysql-deploymentconfig/mysql-dep-config-blueprint.yaml index 494ce44286..9eb8c8b01d 100644 --- a/examples/mysql-deploymentconfig/mysql-dep-config-blueprint.yaml +++ b/examples/mysql-deploymentconfig/mysql-dep-config-blueprint.yaml @@ -17,7 +17,7 @@ actions: name: "{{ .DeploymentConfig.Name }}" namespace: "{{ .DeploymentConfig.Namespace }}" args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .DeploymentConfig.Namespace }}" command: - bash @@ -43,7 +43,7 @@ actions: name: "{{ .DeploymentConfig.Name }}" namespace: "{{ .DeploymentConfig.Namespace }}" args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .DeploymentConfig.Namespace }}" command: - bash @@ -63,7 +63,7 @@ actions: - func: KubeTask name: deleteFromBlobStore args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .Namespace.Name }}" command: - bash diff --git a/examples/mysql/README.md b/examples/mysql/README.md index a7ae28ccba..d62ca045f2 100755 --- a/examples/mysql/README.md +++ b/examples/mysql/README.md @@ -10,7 +10,7 @@ This chart bootstraps a single node MySQL deployment on a [Kubernetes](http://ku - Kubernetes 1.20+ - PV provisioner support in the underlying infrastructure -- Kanister controller version 0.110.0 installed in your cluster, let's assume in Namespace `kanister` +- Kanister controller version 0.111.0 installed in your cluster, let's assume in Namespace `kanister` - Kanctl CLI installed (https://docs.kanister.io/tooling.html#install-the-tools) ## Installing the Chart diff --git a/examples/mysql/blueprint-v2/mysql-blueprint.yaml b/examples/mysql/blueprint-v2/mysql-blueprint.yaml index 71a3f33a8b..a8a8ecdff4 100644 --- a/examples/mysql/blueprint-v2/mysql-blueprint.yaml +++ b/examples/mysql/blueprint-v2/mysql-blueprint.yaml @@ -19,7 +19,7 @@ actions: name: '{{ index .Object.metadata.labels "app.kubernetes.io/instance" }}' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .StatefulSet.Namespace }}" command: - bash @@ -47,7 +47,7 @@ actions: name: '{{ index .Object.metadata.labels "app.kubernetes.io/instance" }}' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .StatefulSet.Namespace }}" command: - bash @@ -71,7 +71,7 @@ actions: - func: KubeTask name: deleteFromStore args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .Namespace.Name }}" command: - bash diff --git a/examples/mysql/mysql-blueprint.yaml b/examples/mysql/mysql-blueprint.yaml index a2b130cb74..0b420da985 100644 --- a/examples/mysql/mysql-blueprint.yaml +++ b/examples/mysql/mysql-blueprint.yaml @@ -17,7 +17,7 @@ actions: name: '{{ index .Object.metadata.labels "app.kubernetes.io/instance" }}' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .StatefulSet.Namespace }}" command: - bash @@ -43,7 +43,7 @@ actions: name: '{{ index .Object.metadata.labels "app.kubernetes.io/instance" }}' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .StatefulSet.Namespace }}" command: - bash @@ -63,7 +63,7 @@ actions: - func: KubeTask name: deleteFromBlobStore args: - image: ghcr.io/kanisterio/mysql-sidecar:0.110.0 + image: ghcr.io/kanisterio/mysql-sidecar:0.111.0 namespace: "{{ .Namespace.Name }}" command: - bash diff --git a/examples/postgresql-deploymentconfig/README.md b/examples/postgresql-deploymentconfig/README.md index f64beddce3..72a2f5533e 100644 --- a/examples/postgresql-deploymentconfig/README.md +++ b/examples/postgresql-deploymentconfig/README.md @@ -14,7 +14,7 @@ cluster's DeploymentConfig resources. - Setup OpenShift, you can follow steps mentioned below - PV provisioner support in the underlying infrastructure -- Kanister controller version 0.110.0 installed in your cluster in namespace `kanister` +- Kanister controller version 0.111.0 installed in your cluster in namespace `kanister` - Kanctl CLI installed (https://docs.kanister.io/tooling.html#kanctl) diff --git a/examples/postgresql-deploymentconfig/blueprint-v2/postgres-dep-config-blueprint.yaml b/examples/postgresql-deploymentconfig/blueprint-v2/postgres-dep-config-blueprint.yaml index 73f890e3a4..bd4e69a29d 100644 --- a/examples/postgresql-deploymentconfig/blueprint-v2/postgres-dep-config-blueprint.yaml +++ b/examples/postgresql-deploymentconfig/blueprint-v2/postgres-dep-config-blueprint.yaml @@ -20,7 +20,7 @@ actions: name: '{{ .DeploymentConfig.Name }}-{{ .DeploymentConfig.Namespace }}' namespace: '{{ .DeploymentConfig.Namespace }}' args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: '{{ .DeploymentConfig.Namespace }}' command: - bash @@ -50,7 +50,7 @@ actions: name: '{{ .DeploymentConfig.Name }}-{{ .DeploymentConfig.Namespace }}' namespace: '{{ .DeploymentConfig.Namespace }}' args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: '{{ .DeploymentConfig.Namespace }}' command: - bash @@ -75,7 +75,7 @@ actions: - func: KubeTask name: deleteDump args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: "{{ .Namespace.Name }}" command: - bash diff --git a/examples/postgresql-deploymentconfig/postgres-dep-config-blueprint.yaml b/examples/postgresql-deploymentconfig/postgres-dep-config-blueprint.yaml index 9c98985151..e3b9333358 100644 --- a/examples/postgresql-deploymentconfig/postgres-dep-config-blueprint.yaml +++ b/examples/postgresql-deploymentconfig/postgres-dep-config-blueprint.yaml @@ -18,7 +18,7 @@ actions: name: '{{ .DeploymentConfig.Name }}-{{ .DeploymentConfig.Namespace }}' namespace: '{{ .DeploymentConfig.Namespace }}' args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: '{{ .DeploymentConfig.Namespace }}' command: - bash @@ -47,7 +47,7 @@ actions: name: '{{ .DeploymentConfig.Name }}-{{ .DeploymentConfig.Namespace }}' namespace: '{{ .DeploymentConfig.Namespace }}' args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: '{{ .DeploymentConfig.Namespace }}' command: - bash @@ -69,7 +69,7 @@ actions: - func: KubeTask name: deleteDump args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: "{{ .Namespace.Name }}" command: - bash diff --git a/examples/postgresql-ha/hook-blueprint/README.md b/examples/postgresql-ha/hook-blueprint/README.md index f5b7dd82f3..83f813431d 100644 --- a/examples/postgresql-ha/hook-blueprint/README.md +++ b/examples/postgresql-ha/hook-blueprint/README.md @@ -20,7 +20,7 @@ This blueprint is only required when you face above mentioned issue, else you wi - Kubernetes 1.10+ - PV provisioner support in the underlying infrastructure -- Kanister controller version 0.110.0 installed in your cluster +- Kanister controller version 0.111.0 installed in your cluster - Kanctl CLI installed (https://docs.kanister.io/tooling.html#kanctl) ## Installing the Chart diff --git a/examples/postgresql-ha/hook-blueprint/postgres-ha-hook.yaml b/examples/postgresql-ha/hook-blueprint/postgres-ha-hook.yaml index 7d781e6e43..5bd5508aa4 100644 --- a/examples/postgresql-ha/hook-blueprint/postgres-ha-hook.yaml +++ b/examples/postgresql-ha/hook-blueprint/postgres-ha-hook.yaml @@ -26,7 +26,7 @@ actions: namespace: '{{ .StatefulSet.Namespace }}' args: namespace: '{{ .StatefulSet.Namespace }}' - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 command: - bash - -o diff --git a/examples/postgresql-wale/README.md b/examples/postgresql-wale/README.md index b780bccec1..5a6914f9cf 100755 --- a/examples/postgresql-wale/README.md +++ b/examples/postgresql-wale/README.md @@ -12,7 +12,7 @@ Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment - Kubernetes 1.10+ - PV provisioner support in the underlying infrastructure -- Kanister controller version 0.110.0 installed in your cluster +- Kanister controller version 0.111.0 installed in your cluster - Kanctl CLI installed (https://docs.kanister.io/tooling.html#kanctl) ## Installing the Chart @@ -25,7 +25,7 @@ $ helm repo update $ helm install my-release bitnami/postgresql \ --namespace postgres-test --create-namespace \ --set image.repository=ghcr.io/kanisterio/postgresql \ - --set image.tag=0.110.0 \ + --set image.tag=0.111.0 \ --set postgresqlPassword=postgres-12345 \ --set postgresqlExtendedConf.archiveCommand="'envdir /bitnami/postgresql/data/env wal-e wal-push %p'" \ --set postgresqlExtendedConf.archiveMode=true \ @@ -41,7 +41,7 @@ In case, if you don't have `Kanister` installed already, you can use following c Add Kanister Helm repository and install Kanister operator ```bash $ helm repo add kanister https://charts.kanister.io -$ helm install kanister --namespace kanister --create-namespace kanister/kanister-operator --set image.tag=0.110.0 +$ helm install kanister --namespace kanister --create-namespace kanister/kanister-operator --set image.tag=0.111.0 ``` ## Integrating with Kanister diff --git a/examples/postgresql-wale/postgresql-blueprint.yaml b/examples/postgresql-wale/postgresql-blueprint.yaml index b4ebd4a71e..2fb2fd3e5c 100644 --- a/examples/postgresql-wale/postgresql-blueprint.yaml +++ b/examples/postgresql-wale/postgresql-blueprint.yaml @@ -132,7 +132,7 @@ actions: - func: PrepareData name: performRestore args: - image: "ghcr.io/kanisterio/postgresql:0.110.0" + image: "ghcr.io/kanisterio/postgresql:0.111.0" namespace: "{{ .StatefulSet.Namespace }}" volumes: "data-{{ .StatefulSet.Name }}-0": "/bitnami/postgresql" @@ -282,7 +282,7 @@ actions: name: deleteArtifact args: namespace: "{{ .Namespace.Name }}" - image: "ghcr.io/kanisterio/postgresql:0.110.0" + image: "ghcr.io/kanisterio/postgresql:0.111.0" command: - bash - -o diff --git a/examples/postgresql/README.md b/examples/postgresql/README.md index c3016bb2af..dcf67205f2 100755 --- a/examples/postgresql/README.md +++ b/examples/postgresql/README.md @@ -12,7 +12,7 @@ Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment - Kubernetes 1.20+ - PV provisioner support in the underlying infrastructure -- Kanister controller version 0.110.0 installed in your cluster +- Kanister controller version 0.111.0 installed in your cluster - Kanctl CLI installed (https://docs.kanister.io/tooling.html#install-the-tools) ## Installing the Chart @@ -34,7 +34,7 @@ In case, if you don't have `Kanister` installed already, you can use following c Add Kanister Helm repository and install Kanister operator ```bash $ helm repo add kanister https://charts.kanister.io -$ helm install kanister --namespace kanister --create-namespace kanister/kanister-operator --set image.tag=0.110.0 +$ helm install kanister --namespace kanister --create-namespace kanister/kanister-operator --set image.tag=0.111.0 ``` ## Integrating with Kanister diff --git a/examples/postgresql/blueprint-v2/postgres-blueprint.yaml b/examples/postgresql/blueprint-v2/postgres-blueprint.yaml index 801d26dd1b..32e3a0dc1d 100644 --- a/examples/postgresql/blueprint-v2/postgres-blueprint.yaml +++ b/examples/postgresql/blueprint-v2/postgres-blueprint.yaml @@ -20,7 +20,7 @@ actions: name: '{{ index .Object.metadata.labels "app.kubernetes.io/instance" }}-postgresql' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: '{{ .StatefulSet.Namespace }}' command: - bash @@ -50,7 +50,7 @@ actions: name: '{{ index .Object.metadata.labels "app.kubernetes.io/instance" }}-postgresql' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: '{{ .StatefulSet.Namespace }}' command: - bash @@ -75,7 +75,7 @@ actions: - func: KubeTask name: deleteDump args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: "{{ .Namespace.Name }}" command: - bash diff --git a/examples/postgresql/postgres-blueprint.yaml b/examples/postgresql/postgres-blueprint.yaml index 410fd3057c..65134cb243 100644 --- a/examples/postgresql/postgres-blueprint.yaml +++ b/examples/postgresql/postgres-blueprint.yaml @@ -18,7 +18,7 @@ actions: name: '{{ index .Object.metadata.labels "app.kubernetes.io/instance" }}-postgresql' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: '{{ .StatefulSet.Namespace }}' command: - bash @@ -47,7 +47,7 @@ actions: name: '{{ index .Object.metadata.labels "app.kubernetes.io/instance" }}-postgresql' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: '{{ .StatefulSet.Namespace }}' command: - bash @@ -69,7 +69,7 @@ actions: - func: KubeTask name: deleteDump args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: "{{ .Namespace.Name }}" command: - bash diff --git a/examples/postgresql/v10.16.2/postgres-blueprint.yaml b/examples/postgresql/v10.16.2/postgres-blueprint.yaml index 4b8e894c5b..6aa9d311a1 100644 --- a/examples/postgresql/v10.16.2/postgres-blueprint.yaml +++ b/examples/postgresql/v10.16.2/postgres-blueprint.yaml @@ -18,7 +18,7 @@ actions: name: '{{ index .Object.metadata.labels "app.kubernetes.io/instance" }}-postgresql' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: '{{ .StatefulSet.Namespace }}' command: - bash @@ -47,7 +47,7 @@ actions: name: '{{ index .Object.metadata.labels "app.kubernetes.io/instance" }}-postgresql' namespace: '{{ .StatefulSet.Namespace }}' args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: '{{ .StatefulSet.Namespace }}' command: - bash @@ -69,7 +69,7 @@ actions: - func: KubeTask name: deleteDump args: - image: ghcr.io/kanisterio/postgres-kanister-tools:0.110.0 + image: ghcr.io/kanisterio/postgres-kanister-tools:0.111.0 namespace: "{{ .Namespace.Name }}" command: - bash diff --git a/examples/redis/README.md b/examples/redis/README.md index 84f5154270..ae26ee1a69 100644 --- a/examples/redis/README.md +++ b/examples/redis/README.md @@ -11,7 +11,7 @@ We will be using [Redis](https://github.com/bitnami/charts/tree/main/bitnami/red - Kubernetes 1.20+ - PV provisioner support in the underlying infrastructure -- Kanister controller version 0.110.0 installed in your cluster, let's assume in Namespace `kanister` +- Kanister controller version 0.111.0 installed in your cluster, let's assume in Namespace `kanister` - Kanctl CLI installed (https://docs.kanister.io/tooling.html#install-the-tools) - Docker CLI installed - A docker image containing the required tools to back up Redis. The Dockerfile for the image can be found [here](https://raw.githubusercontent.com/kanisterio/kanister/master/docker/redis-tools/Dockerfile). To build and push the docker image to your docker registry, execute [these](#build-docker-image) steps. diff --git a/examples/time-log/blueprint.yaml b/examples/time-log/blueprint.yaml index 9f54a36792..526ddd2404 100644 --- a/examples/time-log/blueprint.yaml +++ b/examples/time-log/blueprint.yaml @@ -38,7 +38,7 @@ actions: args: namespace: "{{ .Deployment.Namespace }}" pod: "{{ index .Deployment.Pods 0 }}" - image: ghcr.io/kanisterio/kanister-tools:0.110.0 + image: ghcr.io/kanisterio/kanister-tools:0.111.0 backupArtifactPrefix: "{{ .ArtifactsIn.timeLog.KeyValue.path }}" backupIdentifier: "{{ .ArtifactsIn.backupIdentifier.KeyValue.id }}" - func: ScaleWorkload diff --git a/examples/time-log/time-logger-deployment.yaml b/examples/time-log/time-logger-deployment.yaml index 0c184145c8..2f06e97811 100644 --- a/examples/time-log/time-logger-deployment.yaml +++ b/examples/time-log/time-logger-deployment.yaml @@ -27,7 +27,7 @@ spec: spec: containers: - name: test-container - image: ghcr.io/kanisterio/kanister-tools:0.110.0 + image: ghcr.io/kanisterio/kanister-tools:0.111.0 command: ["sh", "-c"] args: ["while true; do for x in $(seq 1200); do date >> /var/log/time.log; sleep 1; done; truncate /var/log/time.log --size 0; done"] volumeMounts: diff --git a/go.mod b/go.mod index 3d1cdb073c..c8a4e0de0f 100644 --- a/go.mod +++ b/go.mod @@ -45,25 +45,25 @@ require ( github.com/openshift/api v0.0.0-20231222123017-053aee22b4b4 github.com/openshift/client-go v0.0.0-20231221125933-2aa81c72f992 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.20.3 + github.com/prometheus/client_golang v1.20.4 github.com/prometheus/client_model v0.6.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.8.1 go.uber.org/zap v1.27.0 golang.org/x/oauth2 v0.23.0 gonum.org/v1/gonum v0.15.1 - google.golang.org/api v0.196.0 + google.golang.org/api v0.198.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 //pinned k8s.io to v0.29.x tag - k8s.io/api v0.29.8 - k8s.io/apiextensions-apiserver v0.29.8 - k8s.io/apimachinery v0.29.8 - k8s.io/cli-runtime v0.29.8 - k8s.io/client-go v0.29.8 - k8s.io/code-generator v0.29.8 - k8s.io/kubectl v0.29.8 + k8s.io/api v0.29.9 + k8s.io/apiextensions-apiserver v0.29.9 + k8s.io/apimachinery v0.29.9 + k8s.io/cli-runtime v0.29.9 + k8s.io/client-go v0.29.9 + k8s.io/code-generator v0.29.9 + k8s.io/kubectl v0.29.9 sigs.k8s.io/controller-runtime v0.16.6 sigs.k8s.io/yaml v1.3.0 @@ -71,9 +71,9 @@ require ( require ( cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth v0.9.4 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/compute/metadata v0.5.1 // indirect cloud.google.com/go/iam v1.2.0 // indirect cloud.google.com/go/storage v1.43.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect @@ -136,7 +136,7 @@ require ( github.com/google/readahead v0.0.0-20161222183148-eaceba169032 // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.3 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect @@ -206,28 +206,28 @@ require ( go.opentelemetry.io/proto/otlp v1.2.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.26.0 // indirect + golang.org/x/crypto v0.27.0 // indirect golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc v1.66.0 // indirect + google.golang.org/grpc v1.66.2 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/kothar/go-backblaze.v0 v0.0.0-20210124194846-35409b867216 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.29.8 // indirect + k8s.io/component-base v0.29.9 // indirect k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 diff --git a/go.sum b/go.sum index 5757040331..5bc62f8675 100644 --- a/go.sum +++ b/go.sum @@ -8,13 +8,13 @@ cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= -cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= -cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth v0.9.4 h1:DxF7imbEbiFu9+zdKC6cKBko1e8XeJnipNqIbWZ+kDI= +cloud.google.com/go/auth v0.9.4/go.mod h1:SHia8n6//Ya940F1rLimhJCjjx7KE17t0ctFEci3HkA= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.1 h1:NM6oZeZNlYjiwYje+sYFjEpP0Q0zCan1bmQW/KmIrGs= +cloud.google.com/go/compute/metadata v0.5.1/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= @@ -317,8 +317,8 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.3 h1:QRje2j5GZimBzlbhGA2V2QlGNgL8G6e+wGo/+/2bWI0= -github.com/googleapis/enterprise-certificate-proxy v0.3.3/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= @@ -494,8 +494,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7 h1:xoIK0ctDddBMnc74udxJYBqlo9Ylnsp1waqjLsnef20= github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= -github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= -github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= @@ -604,8 +604,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -685,14 +685,14 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -701,8 +701,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -753,8 +753,8 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.196.0 h1:k/RafYqebaIJBO3+SMnfEGtFVlvp5vSgqTUF54UN/zg= -google.golang.org/api v0.196.0/go.mod h1:g9IL21uGkYgvQ5BZg6BAtoGJQIm8r6EgaAbpNey5wBE= +google.golang.org/api v0.198.0 h1:OOH5fZatk57iN0A7tjJQzt6aPfYQ1JiWkt1yGseazks= +google.golang.org/api v0.198.0/go.mod h1:/Lblzl3/Xqqk9hw/yS97TImKTUwnf1bv89v7+OagJzc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -784,8 +784,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -824,23 +824,23 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= -k8s.io/api v0.29.8 h1:ZBKg9clWnIGtQ5yGhNwMw2zyyrsIAQaXhZACcYNflQE= -k8s.io/api v0.29.8/go.mod h1:XlGIpmpzKGrtVca7GlgNryZJ19SvQdI808NN7fy1SgQ= -k8s.io/apiextensions-apiserver v0.29.8 h1:VkyGgClTTWs8i81O13wsTLSs9Q1PWVr0L880F2GjwUI= -k8s.io/apiextensions-apiserver v0.29.8/go.mod h1:e6dPglIfPWm9ydsXuNqefecEVDH0uLfzClJEupSk2VU= +k8s.io/api v0.29.9 h1:FwdflpNsfMUYUOblMZNWJ4K/q0OSL5A4jGa0iOgcJco= +k8s.io/api v0.29.9/go.mod h1:fNhmzRfKaSEHCmczA/jRx6CiDKhYOnFLJBERMJAXEk8= +k8s.io/apiextensions-apiserver v0.29.9 h1:EB6RK06kFJjbzBwU1YiVznxrcgBE0hhDWt6EQQIcOy4= +k8s.io/apiextensions-apiserver v0.29.9/go.mod h1:jcaHG6R/bB1iU6XzC1DMhB1x2ktTJLt2KKpg6B65Z2c= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.29.8 h1:uBHc9WuKiTHClIspJqtR84WNpG0aOGn45HWqxgXkk8Y= -k8s.io/apimachinery v0.29.8/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= -k8s.io/cli-runtime v0.29.8 h1:kVErAPf1v7MOwNO6rBYnf2i4kQ2668Y9pHGO5C1/wSo= -k8s.io/cli-runtime v0.29.8/go.mod h1:c00Fk85K05DtEknMAi1t7ao1MR4nmQ9YlvC+QluvNoY= +k8s.io/apimachinery v0.29.9 h1:YZ8HUid1TzQVz94cnNlsQjLdH0VoAhWSqz7t0q6B12A= +k8s.io/apimachinery v0.29.9/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/cli-runtime v0.29.9 h1:rryHH2SZtZePv0gj9RM5ftXYcK8v2jbMLOzbORnzzw4= +k8s.io/cli-runtime v0.29.9/go.mod h1:IHgU0jdyAOcrfkpvaDXZRqPe+RJYlUgbufl88Z6EUyo= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= -k8s.io/client-go v0.29.8 h1:QMRKcIzqE/qawknXcsi51GdIAYN8UP39S/M5KnFu/J0= -k8s.io/client-go v0.29.8/go.mod h1:ZzrAAVrqO2jVXMb8My/jTke8n0a/mIynnA3y/1y1UB0= +k8s.io/client-go v0.29.9 h1:4f/Wz6li3rEyIPFj32XAQMtOGMM1tg7KQi1oeS6ibPg= +k8s.io/client-go v0.29.9/go.mod h1:2N1drQEZ5yiYrWVaE2Un8JiISUhl47D8pyZlYLszke4= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/code-generator v0.29.8 h1:RQ6kM269HlPvmX4BDyjwPwCzsV6Gqa+rtmE96t9csbg= -k8s.io/code-generator v0.29.8/go.mod h1:7TYnI0dYItL2cKuhhgPSuF3WED9uMdELgbVXFfn/joE= -k8s.io/component-base v0.29.8 h1:4LJ94/eOJpDFZFbGbRH4CEyk29a7PZr8noVe9tBJUUY= -k8s.io/component-base v0.29.8/go.mod h1:FYOQSsKgh9/+FNleq8m6cXH2Cq8fNiUnJzDROowLaqU= +k8s.io/code-generator v0.29.9 h1:57k53ZbD4W4NFlTV2iH7nKfmoLP4Q6yW2o2H2nyZpF0= +k8s.io/code-generator v0.29.9/go.mod h1:7TYnI0dYItL2cKuhhgPSuF3WED9uMdELgbVXFfn/joE= +k8s.io/component-base v0.29.9 h1:lPENvp3CCwdeMEWGjiTfn5b287qQYuK7gX32OBOovmA= +k8s.io/component-base v0.29.9/go.mod h1:NGDa6Ih0EdcLA2G4K2ZYySoiB+2Tn+rmSqPyudCPgDY= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= @@ -852,8 +852,8 @@ k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/kubectl v0.29.8 h1:JuMEjC+dCSWwF3Z4L22MFE7TvdOZn5bZ/qcNkQUz9J0= -k8s.io/kubectl v0.29.8/go.mod h1:x9o1S8AqTk+/C+e8EaRRhQLw5w0Jadd+4S/vsEAVA4c= +k8s.io/kubectl v0.29.9 h1:8DJIPkRk5a6WonxRAbicJIn0DNwacOxaLxO4EDwf/hc= +k8s.io/kubectl v0.29.9/go.mod h1:ylJbHUuPTYiwxAKx97nAyU3TKh3vHlEW+Pp44Usolvw= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= diff --git a/helm/kanister-operator/Chart.yaml b/helm/kanister-operator/Chart.yaml index e1cf950cc6..2a6ce179b0 100644 --- a/helm/kanister-operator/Chart.yaml +++ b/helm/kanister-operator/Chart.yaml @@ -9,5 +9,5 @@ maintainers: - email: tom@kasten.io name: tdmanv icon: https://kasten.io/assets/img/kanister-logo.png -appVersion: 0.110.0 +appVersion: 0.111.0 source: https://github.com/kanisterio/kanister diff --git a/helm/kanister-operator/templates/rbac.yaml b/helm/kanister-operator/templates/rbac.yaml index b9007e754c..f6f61b2e57 100644 --- a/helm/kanister-operator/templates/rbac.yaml +++ b/helm/kanister-operator/templates/rbac.yaml @@ -39,21 +39,6 @@ rules: --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: -{{ include "kanister-operator.helmLabels" . | indent 4 }} - name: {{ template "kanister-operator.fullname" . }}-edit-role -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: edit -subjects: -- kind: ServiceAccount - name: {{ template "kanister-operator.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: {{ include "kanister-operator.helmLabels" . | indent 4 }} diff --git a/helm/kanister-operator/values.yaml b/helm/kanister-operator/values.yaml index 6de036f300..e6967d08b2 100644 --- a/helm/kanister-operator/values.yaml +++ b/helm/kanister-operator/values.yaml @@ -3,17 +3,17 @@ # Declare variables to be passed into your templates. image: repository: ghcr.io/kanisterio/controller - tag: 0.110.0 + tag: 0.111.0 pullPolicy: IfNotPresent repositoryServerControllerImage: registry: ghcr.io/kanisterio name: repo-server-controller - tag: 0.110.0 + tag: 0.111.0 pullPolicy: IfNotPresent kanisterToolsImage: override: false image: ghcr.io/kanisterio/kanister-tools - tag: 0.110.0 + tag: 0.111.0 rbac: create: true serviceAccount: diff --git a/helm/profile/Chart.yaml b/helm/profile/Chart.yaml index a26f5ce6e4..3bf7b28d2d 100644 --- a/helm/profile/Chart.yaml +++ b/helm/profile/Chart.yaml @@ -3,7 +3,7 @@ description: A helm chart to create profile custom resource for kanister engine: gotpl name: profile home: https://kanister.io/ -version: 0.110.0 +version: 0.111.0 maintainers: - email: tom@kasten.io name: tdmanv diff --git a/pkg/apis/cr/v1alpha1/repositoryserver_types_test.go b/pkg/apis/cr/v1alpha1/repositoryserver_types_test.go index 5bbb2203ce..0e051742f1 100644 --- a/pkg/apis/cr/v1alpha1/repositoryserver_types_test.go +++ b/pkg/apis/cr/v1alpha1/repositoryserver_types_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" ) @@ -58,28 +58,28 @@ spec: username: test-kanister-user ` -func TestRepositoryServer(t *testing.T) { TestingT(t) } +func TestRepositoryServer(t *testing.T) { check.TestingT(t) } -func (s *TypesSuite) TestRepositoryServerDecode(c *C) { +func (s *TypesSuite) TestRepositoryServerDecode(c *check.C) { rs, err := getRepositoryServerFromSpec([]byte(repoServerSpec)) - c.Assert(err, IsNil) - c.Assert(rs, NotNil) - c.Assert(rs.Spec.Storage.SecretRef.Name, Equals, "test-s3-location") - c.Assert(rs.Spec.Storage.SecretRef.Namespace, Equals, "kanister") - c.Assert(rs.Spec.Storage.CredentialSecretRef.Name, Equals, "test-s3-creds") - c.Assert(rs.Spec.Storage.CredentialSecretRef.Namespace, Equals, "kanister") - c.Assert(rs.Spec.Repository.RootPath, Equals, "/test-repo-controller/") - c.Assert(rs.Spec.Repository.PasswordSecretRef.Name, Equals, "test-repo-pass") - c.Assert(rs.Spec.Repository.PasswordSecretRef.Namespace, Equals, "kanister") - c.Assert(rs.Spec.Repository.Username, Equals, "test-repository-user") - c.Assert(rs.Spec.Repository.Hostname, Equals, "localhost") - c.Assert(rs.Spec.Server.AdminSecretRef.Name, Equals, "test-repository-admin-user") - c.Assert(rs.Spec.Server.AdminSecretRef.Namespace, Equals, "kanister") - c.Assert(rs.Spec.Server.TLSSecretRef.Name, Equals, "test-repository-server-tls-cert") - c.Assert(rs.Spec.Server.TLSSecretRef.Namespace, Equals, "kanister") - c.Assert(rs.Spec.Server.UserAccess.UserAccessSecretRef.Name, Equals, "test-repository-server-user-access") - c.Assert(rs.Spec.Server.UserAccess.UserAccessSecretRef.Namespace, Equals, "kanister") - c.Assert(rs.Spec.Server.UserAccess.Username, Equals, "test-kanister-user") + c.Assert(err, check.IsNil) + c.Assert(rs, check.NotNil) + c.Assert(rs.Spec.Storage.SecretRef.Name, check.Equals, "test-s3-location") + c.Assert(rs.Spec.Storage.SecretRef.Namespace, check.Equals, "kanister") + c.Assert(rs.Spec.Storage.CredentialSecretRef.Name, check.Equals, "test-s3-creds") + c.Assert(rs.Spec.Storage.CredentialSecretRef.Namespace, check.Equals, "kanister") + c.Assert(rs.Spec.Repository.RootPath, check.Equals, "/test-repo-controller/") + c.Assert(rs.Spec.Repository.PasswordSecretRef.Name, check.Equals, "test-repo-pass") + c.Assert(rs.Spec.Repository.PasswordSecretRef.Namespace, check.Equals, "kanister") + c.Assert(rs.Spec.Repository.Username, check.Equals, "test-repository-user") + c.Assert(rs.Spec.Repository.Hostname, check.Equals, "localhost") + c.Assert(rs.Spec.Server.AdminSecretRef.Name, check.Equals, "test-repository-admin-user") + c.Assert(rs.Spec.Server.AdminSecretRef.Namespace, check.Equals, "kanister") + c.Assert(rs.Spec.Server.TLSSecretRef.Name, check.Equals, "test-repository-server-tls-cert") + c.Assert(rs.Spec.Server.TLSSecretRef.Namespace, check.Equals, "kanister") + c.Assert(rs.Spec.Server.UserAccess.UserAccessSecretRef.Name, check.Equals, "test-repository-server-user-access") + c.Assert(rs.Spec.Server.UserAccess.UserAccessSecretRef.Namespace, check.Equals, "kanister") + c.Assert(rs.Spec.Server.UserAccess.Username, check.Equals, "test-kanister-user") } func getRepositoryServerFromSpec(spec []byte) (*RepositoryServer, error) { diff --git a/pkg/apis/cr/v1alpha1/types_test.go b/pkg/apis/cr/v1alpha1/types_test.go index 31daba9752..6da1bc3d7d 100644 --- a/pkg/apis/cr/v1alpha1/types_test.go +++ b/pkg/apis/cr/v1alpha1/types_test.go @@ -19,16 +19,16 @@ import ( "testing" "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" ) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type TypesSuite struct{} -var _ = Suite(&TypesSuite{}) +var _ = check.Suite(&TypesSuite{}) const bpSpec = ` actions: @@ -50,7 +50,7 @@ actions: foo: bar ` -func (s *TypesSuite) TestBlueprintDecode(c *C) { +func (s *TypesSuite) TestBlueprintDecode(c *check.C) { expected := map[string]reflect.Kind{ "testint": reflect.Int64, "teststring": reflect.String, @@ -59,12 +59,12 @@ func (s *TypesSuite) TestBlueprintDecode(c *C) { } bp, err := getBlueprintFromSpec([]byte(bpSpec)) - c.Assert(err, IsNil) - c.Assert(bp.Actions["echo"].Phases[0].Args, HasLen, len(expected)) + c.Assert(err, check.IsNil) + c.Assert(bp.Actions["echo"].Phases[0].Args, check.HasLen, len(expected)) for n, evk := range expected { v := bp.Actions["echo"].Phases[0].Args[n] - c.Check(v, Not(Equals), nil) - c.Check(reflect.TypeOf(v).Kind(), Equals, evk) + c.Check(v, check.Not(check.Equals), nil) + c.Check(reflect.TypeOf(v).Kind(), check.Equals, evk) } } diff --git a/pkg/app/bp_test.go b/pkg/app/bp_test.go index eabd96b080..108af79f02 100644 --- a/pkg/app/bp_test.go +++ b/pkg/app/bp_test.go @@ -19,7 +19,7 @@ import ( "strings" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -27,13 +27,13 @@ import ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type BlueprintSuite struct{} -var _ = Suite(&BlueprintSuite{}) +var _ = check.Suite(&BlueprintSuite{}) -func (bs *BlueprintSuite) TestUpdateImageTags(c *C) { +func (bs *BlueprintSuite) TestUpdateImageTags(c *check.C) { for _, bp := range []*crv1alpha1.Blueprint{ // BP with no phase with image arg { @@ -138,7 +138,7 @@ func (bs *BlueprintSuite) TestUpdateImageTags(c *C) { } } -func validateImageTags(c *C, bp *crv1alpha1.Blueprint) { +func validateImageTags(c *check.C, bp *crv1alpha1.Blueprint) { podOverride := crv1alpha1.JSONMap{ "containers": []map[string]interface{}{ { @@ -156,9 +156,9 @@ func validateImageTags(c *C, bp *crv1alpha1.Blueprint) { // Verify if image with prefix "ghcr.io/kanisterio" is tagged "v9.99.9-dev" c.Log(fmt.Sprintf("phase:%s, image:%s", phase.Name, image.(string))) if strings.HasPrefix(image.(string), imagePrefix) { - c.Assert(strings.Split(image.(string), ":")[1], Equals, "v9.99.9-dev") + c.Assert(strings.Split(image.(string), ":")[1], check.Equals, "v9.99.9-dev") } - c.Assert(phase.Args["podOverride"], DeepEquals, podOverride) + c.Assert(phase.Args["podOverride"], check.DeepEquals, podOverride) } } } diff --git a/pkg/app/csi-snapshot.go b/pkg/app/csi-snapshot.go index 3c2c6a601f..a95892b4ab 100644 --- a/pkg/app/csi-snapshot.go +++ b/pkg/app/csi-snapshot.go @@ -192,7 +192,7 @@ func (tlc TimeLogCSI) getAppDeploymentObj() *appsv1.Deployment { Containers: []corev1.Container{ { Name: "test-container", - Image: "ghcr.io/kanisterio/kanister-tools:0.110.0", + Image: "ghcr.io/kanisterio/kanister-tools:0.111.0", Command: []string{"sh", "-c"}, Args: []string{"while true; do for x in $(seq 1200); do date >> /var/log/time.log; sleep 1; done; truncate /var/log/time.log --size 0; done"}, VolumeMounts: []corev1.VolumeMount{ diff --git a/pkg/blockstorage/awsebs/awsebs_test.go b/pkg/blockstorage/awsebs/awsebs_test.go index 4f81adca58..ff4794f86f 100644 --- a/pkg/blockstorage/awsebs/awsebs_test.go +++ b/pkg/blockstorage/awsebs/awsebs_test.go @@ -22,7 +22,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" kaws "github.com/kanisterio/kanister/pkg/aws" "github.com/kanisterio/kanister/pkg/blockstorage" @@ -30,13 +30,13 @@ import ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type AWSEBSSuite struct{} -var _ = Suite(&AWSEBSSuite{}) +var _ = check.Suite(&AWSEBSSuite{}) -func (s AWSEBSSuite) TestVolumeParse(c *C) { +func (s AWSEBSSuite) TestVolumeParse(c *check.C) { expected := blockstorage.Volume{ Az: "the-availability-zone", CreationTime: blockstorage.TimeStamp(time.Date(2008, 8, 21, 5, 50, 0, 0, time.UTC)), @@ -71,20 +71,20 @@ func (s AWSEBSSuite) TestVolumeParse(c *C) { VolumeType: aws.String("the-volume-type"), }) - c.Assert(volume, Not(IsNil)) - c.Check(volume.Az, Equals, expected.Az) - c.Check(volume.CreationTime, Equals, expected.CreationTime) - c.Check(volume.Encrypted, Equals, expected.Encrypted) - c.Check(volume.ID, Equals, expected.ID) - c.Check(volume.Iops, Equals, expected.Iops) - c.Check(volume.SizeInBytes, Equals, expected.SizeInBytes) - c.Check(volume.Tags, DeepEquals, expected.Tags) - c.Check(volume.Type, Equals, blockstorage.TypeEBS) - c.Check(volume.VolumeType, Equals, expected.VolumeType) - c.Check(volume.Attributes, DeepEquals, expected.Attributes) + c.Assert(volume, check.Not(check.IsNil)) + c.Check(volume.Az, check.Equals, expected.Az) + c.Check(volume.CreationTime, check.Equals, expected.CreationTime) + c.Check(volume.Encrypted, check.Equals, expected.Encrypted) + c.Check(volume.ID, check.Equals, expected.ID) + c.Check(volume.Iops, check.Equals, expected.Iops) + c.Check(volume.SizeInBytes, check.Equals, expected.SizeInBytes) + c.Check(volume.Tags, check.DeepEquals, expected.Tags) + c.Check(volume.Type, check.Equals, blockstorage.TypeEBS) + c.Check(volume.VolumeType, check.Equals, expected.VolumeType) + c.Check(volume.Attributes, check.DeepEquals, expected.Attributes) } -func (s AWSEBSSuite) TestGetRegions(c *C) { +func (s AWSEBSSuite) TestGetRegions(c *check.C) { ctx := context.Background() config := map[string]string{} @@ -94,18 +94,18 @@ func (s AWSEBSSuite) TestGetRegions(c *C) { // create provider with region config[kaws.ConfigRegion] = "us-west-2" bsp, err := NewProvider(ctx, config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ebsp := bsp.(*EbsStorage) // get zones with other region zones, err := ebsp.FromRegion(ctx, "us-east-1") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, zone := range zones { - c.Assert(strings.Contains(zone, "us-east-1"), Equals, true) - c.Assert(strings.Contains(zone, "us-west-2"), Equals, false) + c.Assert(strings.Contains(zone, "us-east-1"), check.Equals, true) + c.Assert(strings.Contains(zone, "us-west-2"), check.Equals, false) } regions, err := ebsp.GetRegions(ctx) - c.Assert(err, IsNil) - c.Assert(regions, NotNil) + c.Assert(err, check.IsNil) + c.Assert(regions, check.NotNil) } diff --git a/pkg/blockstorage/awsebs/zone_test.go b/pkg/blockstorage/awsebs/zone_test.go index b92a37c55e..23f20f74e5 100644 --- a/pkg/blockstorage/awsebs/zone_test.go +++ b/pkg/blockstorage/awsebs/zone_test.go @@ -15,16 +15,16 @@ package awsebs import ( - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/blockstorage/zone" ) type ZoneSuite struct{} -var _ = Suite(&ZoneSuite{}) +var _ = check.Suite(&ZoneSuite{}) -func (s ZoneSuite) TestZoneWithUnknownNodeZones(c *C) { +func (s ZoneSuite) TestZoneWithUnknownNodeZones(c *check.C) { defaultZones := []string{"us-west-2a", "us-west-2b", "us-west-2c"} for _, tc := range []struct { zones []string @@ -48,6 +48,6 @@ func (s ZoneSuite) TestZoneWithUnknownNodeZones(c *C) { }, } { z := zone.SanitizeAvailableZones(tc.in, tc.zones) - c.Assert(z, DeepEquals, tc.out) + c.Assert(z, check.DeepEquals, tc.out) } } diff --git a/pkg/blockstorage/azure/auth_test.go b/pkg/blockstorage/azure/auth_test.go index e156e7c303..dcfe377911 100644 --- a/pkg/blockstorage/azure/auth_test.go +++ b/pkg/blockstorage/azure/auth_test.go @@ -15,65 +15,65 @@ package azure import ( - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/blockstorage" ) type AuthSuite struct{} -var _ = Suite(&AuthSuite{}) +var _ = check.Suite(&AuthSuite{}) -func (s *AuthSuite) SetUpSuite(c *C) { +func (s *AuthSuite) SetUpSuite(c *check.C) { } -func (s *AuthSuite) TestIsClientCredsvailable(c *C) { +func (s *AuthSuite) TestIsClientCredsvailable(c *check.C) { // success config := map[string]string{ blockstorage.AzureTenantID: "some-tenant-id", blockstorage.AzureClientID: "some-client-id", blockstorage.AzureClientSecret: "someclient-secret", } - c.Assert(isClientCredsAvailable(config), Equals, true) + c.Assert(isClientCredsAvailable(config), check.Equals, true) // remove tenantID delete(config, blockstorage.AzureTenantID) - c.Assert(isClientCredsAvailable(config), Equals, false) + c.Assert(isClientCredsAvailable(config), check.Equals, false) // remove client secret, only client ID left delete(config, blockstorage.AzureClientSecret) - c.Assert(isClientCredsAvailable(config), Equals, false) + c.Assert(isClientCredsAvailable(config), check.Equals, false) } -func (s *AuthSuite) TestIsMSICredsAvailable(c *C) { +func (s *AuthSuite) TestIsMSICredsAvailable(c *check.C) { // success config := map[string]string{ blockstorage.AzureTenantID: "some-tenant-id", blockstorage.AzureClientID: "some-client-id", blockstorage.AzureClientSecret: "someclient-secret", } - c.Assert(isMSICredsAvailable(config), Equals, false) + c.Assert(isMSICredsAvailable(config), check.Equals, false) // remove tenantID delete(config, blockstorage.AzureTenantID) - c.Assert(isMSICredsAvailable(config), Equals, false) + c.Assert(isMSICredsAvailable(config), check.Equals, false) // remove client secret, only client ID left delete(config, blockstorage.AzureClientSecret) - c.Assert(isMSICredsAvailable(config), Equals, true) + c.Assert(isMSICredsAvailable(config), check.Equals, true) // empty client ID - default msi id is implied config = map[string]string{ blockstorage.AzureClientID: "", } - c.Assert(isMSICredsAvailable(config), Equals, true) + c.Assert(isMSICredsAvailable(config), check.Equals, true) // empty creds config = map[string]string{} - c.Assert(isMSICredsAvailable(config), Equals, false) + c.Assert(isMSICredsAvailable(config), check.Equals, false) } -func (s *AuthSuite) TestNewAzureAuthenticator(c *C) { +func (s *AuthSuite) TestNewAzureAuthenticator(c *check.C) { // successful with client secret creds config := map[string]string{ blockstorage.AzureTenantID: "some-tenant-id", @@ -81,40 +81,40 @@ func (s *AuthSuite) TestNewAzureAuthenticator(c *C) { blockstorage.AzureClientSecret: "some-client-secret", } authenticator, err := NewAzureAuthenticator(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, ok := authenticator.(*ClientSecretAuthenticator) - c.Assert(ok, Equals, true) + c.Assert(ok, check.Equals, true) // successful with msi creds config = map[string]string{ blockstorage.AzureClientID: "some-client-id", } authenticator, err = NewAzureAuthenticator(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, ok = authenticator.(*MsiAuthenticator) - c.Assert(ok, Equals, true) + c.Assert(ok, check.Equals, true) // successful with default msi creds config = map[string]string{ blockstorage.AzureClientID: "", } authenticator, err = NewAzureAuthenticator(config) - c.Assert(err, IsNil) - c.Assert(authenticator, NotNil) + c.Assert(err, check.IsNil) + c.Assert(authenticator, check.NotNil) // unsuccessful with no creds config = map[string]string{} authenticator, err = NewAzureAuthenticator(config) - c.Assert(err, NotNil) - c.Assert(authenticator, IsNil) + c.Assert(err, check.NotNil) + c.Assert(authenticator, check.IsNil) // unsuccessful with an undefined combo of credss config = map[string]string{ blockstorage.AzureClientSecret: "some-client-secret", } authenticator, err = NewAzureAuthenticator(config) - c.Assert(err, NotNil) - c.Assert(authenticator, IsNil) + c.Assert(err, check.NotNil) + c.Assert(authenticator, check.IsNil) // unsuccessful with an undefined combo of creds config = map[string]string{ @@ -122,6 +122,6 @@ func (s *AuthSuite) TestNewAzureAuthenticator(c *C) { blockstorage.AzureClientSecret: "some-client-secret", } authenticator, err = NewAzureAuthenticator(config) - c.Assert(err, NotNil) - c.Assert(authenticator, IsNil) + c.Assert(err, check.NotNil) + c.Assert(authenticator, check.IsNil) } diff --git a/pkg/blockstorage/azure/client_test.go b/pkg/blockstorage/azure/client_test.go index 72970dc6f5..95591b54a2 100644 --- a/pkg/blockstorage/azure/client_test.go +++ b/pkg/blockstorage/azure/client_test.go @@ -21,23 +21,23 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/blockstorage" envconfig "github.com/kanisterio/kanister/pkg/config" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type ClientSuite struct{} -var _ = Suite(&ClientSuite{}) +var _ = check.Suite(&ClientSuite{}) -func (s *ClientSuite) SetUpSuite(c *C) { +func (s *ClientSuite) SetUpSuite(c *check.C) { } -func (s *ClientSuite) TestClient(c *C) { +func (s *ClientSuite) TestClient(c *check.C) { c.Skip("Until Azure will be fully integrated into build.sh") config := make(map[string]string) config[blockstorage.AzureSubscriptionID] = envconfig.GetEnvOrSkip(c, blockstorage.AzureSubscriptionID) @@ -47,18 +47,18 @@ func (s *ClientSuite) TestClient(c *C) { config[blockstorage.AzureResurceGroup] = envconfig.GetEnvOrSkip(c, blockstorage.AzureResurceGroup) config[blockstorage.AzureCloudEnvironmentID] = envconfig.GetEnvOrSkip(c, blockstorage.AzureCloudEnvironmentID) azCli, err := NewClient(context.Background(), config) - c.Assert(err, IsNil) - c.Assert(azCli.Cred, NotNil) - c.Assert(azCli.SubscriptionID, NotNil) - c.Assert(azCli.DisksClient, NotNil) - c.Assert(azCli.SnapshotsClient, NotNil) - c.Assert(azCli.DisksClient.NewListPager(nil), NotNil) - c.Assert(azCli.SKUsClient, NotNil) - c.Assert(azCli.SubscriptionsClient, NotNil) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) + c.Assert(azCli.Cred, check.NotNil) + c.Assert(azCli.SubscriptionID, check.NotNil) + c.Assert(azCli.DisksClient, check.NotNil) + c.Assert(azCli.SnapshotsClient, check.NotNil) + c.Assert(azCli.DisksClient.NewListPager(nil), check.NotNil) + c.Assert(azCli.SKUsClient, check.NotNil) + c.Assert(azCli.SubscriptionsClient, check.NotNil) + c.Assert(err, check.IsNil) } -func (s ClientSuite) TestGetRegions(c *C) { +func (s ClientSuite) TestGetRegions(c *check.C) { ctx := context.Background() config := map[string]string{} config[blockstorage.AzureSubscriptionID] = envconfig.GetEnvOrSkip(c, blockstorage.AzureSubscriptionID) @@ -69,29 +69,29 @@ func (s ClientSuite) TestGetRegions(c *C) { // config[blockstorage.AzureCloudEnviornmentID] = envconfig.GetEnvOrSkip(c, blockstorage.AzureCloudEnviornmentID) bsp, err := NewProvider(ctx, config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ads := bsp.(*AdStorage) // get zones with other region zones, err := ads.FromRegion(ctx, "eastus") fmt.Println(zones) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, zone := range zones { - c.Assert(strings.Contains(zone, "eastus"), Equals, true) - c.Assert(strings.Contains(zone, "westus"), Equals, false) + c.Assert(strings.Contains(zone, "eastus"), check.Equals, true) + c.Assert(strings.Contains(zone, "westus"), check.Equals, false) } regions, err := ads.GetRegions(ctx) - c.Assert(err, IsNil) - c.Assert(regions, NotNil) + c.Assert(err, check.IsNil) + c.Assert(regions, check.NotNil) } -func (s *ClientSuite) TestGetCredConfig(c *C) { +func (s *ClientSuite) TestGetCredConfig(c *check.C) { for _, tc := range []struct { name string env Environment config map[string]string - errChecker Checker + errChecker check.Checker expCCC ClientCredentialsConfig }{ { @@ -111,7 +111,7 @@ func (s *ClientSuite) TestGetCredConfig(c *C) { Resource: "aadrid", AADEndpoint: "aade", }, - errChecker: IsNil, + errChecker: check.IsNil, }, { name: "Test with client credential in configuration", @@ -128,7 +128,7 @@ func (s *ClientSuite) TestGetCredConfig(c *C) { Resource: cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint, AADEndpoint: cloud.AzurePublic.ActiveDirectoryAuthorityHost, }, - errChecker: IsNil, + errChecker: check.IsNil, }, { name: "Test without AD in configuration", @@ -147,7 +147,7 @@ func (s *ClientSuite) TestGetCredConfig(c *C) { Resource: cloud.AzureGovernment.Services[cloud.ResourceManager].Endpoint, AADEndpoint: cloud.AzureGovernment.ActiveDirectoryAuthorityHost, }, - errChecker: IsNil, + errChecker: check.IsNil, }, { name: "Test with tenantid and clientid in configuration", @@ -156,7 +156,7 @@ func (s *ClientSuite) TestGetCredConfig(c *C) { blockstorage.AzureTenantID: "atid", blockstorage.AzureClientID: "acid", }, - errChecker: NotNil, + errChecker: check.NotNil, }, { name: "Test with tenantid in configuration", @@ -164,23 +164,23 @@ func (s *ClientSuite) TestGetCredConfig(c *C) { config: map[string]string{ blockstorage.AzureTenantID: "atid", }, - errChecker: NotNil, + errChecker: check.NotNil, }, { name: "Test with nil configuration", env: USGovernmentCloud, config: map[string]string{}, - errChecker: NotNil, + errChecker: check.NotNil, }, } { ccc, err := getCredConfig(tc.env, tc.config) c.Assert(err, tc.errChecker) if err == nil { - c.Assert(ccc.ClientID, Equals, tc.expCCC.ClientID) - c.Assert(ccc.ClientSecret, Equals, tc.expCCC.ClientSecret) - c.Assert(ccc.TenantID, Equals, tc.expCCC.TenantID) - c.Assert(ccc.Resource, Equals, tc.expCCC.Resource) - c.Assert(ccc.AADEndpoint, Equals, tc.expCCC.AADEndpoint) + c.Assert(ccc.ClientID, check.Equals, tc.expCCC.ClientID) + c.Assert(ccc.ClientSecret, check.Equals, tc.expCCC.ClientSecret) + c.Assert(ccc.TenantID, check.Equals, tc.expCCC.TenantID) + c.Assert(ccc.Resource, check.Equals, tc.expCCC.Resource) + c.Assert(ccc.AADEndpoint, check.Equals, tc.expCCC.AADEndpoint) } } } diff --git a/pkg/blockstorage/blockstorage_test.go b/pkg/blockstorage/blockstorage_test.go index a959bb3c3b..10bd57e1cf 100644 --- a/pkg/blockstorage/blockstorage_test.go +++ b/pkg/blockstorage/blockstorage_test.go @@ -20,7 +20,7 @@ import ( "strings" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" awsconfig "github.com/kanisterio/kanister/pkg/aws" "github.com/kanisterio/kanister/pkg/blockstorage" @@ -38,7 +38,7 @@ const ( testTagValue = "unittest" ) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type BlockStorageProviderSuite struct { storageType blockstorage.Type @@ -50,39 +50,39 @@ type BlockStorageProviderSuite struct { args map[string]string } -var _ = Suite(&BlockStorageProviderSuite{storageType: blockstorage.TypeEBS, storageRegion: clusterRegionAWS, storageAZ: "us-west-2b"}) -var _ = Suite(&BlockStorageProviderSuite{storageType: blockstorage.TypeGPD, storageRegion: "", storageAZ: "us-west1-b"}) -var _ = Suite(&BlockStorageProviderSuite{storageType: blockstorage.TypeGPD, storageRegion: "", storageAZ: "us-west1-c__us-west1-a"}) -var _ = Suite(&BlockStorageProviderSuite{storageType: blockstorage.TypeAD, storageRegion: "", storageAZ: "eastus2-1"}) +var _ = check.Suite(&BlockStorageProviderSuite{storageType: blockstorage.TypeEBS, storageRegion: clusterRegionAWS, storageAZ: "us-west-2b"}) +var _ = check.Suite(&BlockStorageProviderSuite{storageType: blockstorage.TypeGPD, storageRegion: "", storageAZ: "us-west1-b"}) +var _ = check.Suite(&BlockStorageProviderSuite{storageType: blockstorage.TypeGPD, storageRegion: "", storageAZ: "us-west1-c__us-west1-a"}) +var _ = check.Suite(&BlockStorageProviderSuite{storageType: blockstorage.TypeAD, storageRegion: "", storageAZ: "eastus2-1"}) -func (s *BlockStorageProviderSuite) SetUpSuite(c *C) { +func (s *BlockStorageProviderSuite) SetUpSuite(c *check.C) { var err error s.args = make(map[string]string) config := s.getConfig(c, s.storageRegion) s.provider, err = getter.New().Get(s.storageType, config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *BlockStorageProviderSuite) TearDownTest(c *C) { +func (s *BlockStorageProviderSuite) TearDownTest(c *check.C) { for _, snapshot := range s.snapshots { - c.Assert(s.provider.SnapshotDelete(context.Background(), snapshot), IsNil) + c.Assert(s.provider.SnapshotDelete(context.Background(), snapshot), check.IsNil) } s.snapshots = nil for _, volume := range s.volumes { - c.Assert(s.provider.VolumeDelete(context.Background(), volume), IsNil) + c.Assert(s.provider.VolumeDelete(context.Background(), volume), check.IsNil) } s.volumes = nil } -func (s *BlockStorageProviderSuite) TestCreateVolume(c *C) { +func (s *BlockStorageProviderSuite) TestCreateVolume(c *check.C) { vol := s.createVolume(c) // Check setting tags on the volume tags := map[string]string{"testtag": "testtagvalue"} err := s.provider.SetTags(context.Background(), vol, tags) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) volUpdated, err := s.provider.VolumeGet(context.Background(), vol.ID, vol.Az) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check previously set tags still exist s.checkTagsExist(c, blockstorage.KeyValueToMap(volUpdated.Tags), blockstorage.KeyValueToMap(vol.Tags)) // Check new tags were set @@ -94,21 +94,21 @@ func (s *BlockStorageProviderSuite) TestCreateVolume(c *C) { s.testVolumesList(c) err = s.provider.VolumeDelete(context.Background(), volUpdated) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // We ensure that multiple deletions are handled. err = s.provider.VolumeDelete(context.Background(), volUpdated) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.volumes = nil } -func (s *BlockStorageProviderSuite) TestCreateSnapshot(c *C) { +func (s *BlockStorageProviderSuite) TestCreateSnapshot(c *check.C) { snapshot := s.createSnapshot(c) // Check setting tags on the snapshot tags := map[string]string{"testtag": "testtagvalue"} err := s.provider.SetTags(context.Background(), snapshot, tags) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) snap, err := s.provider.SnapshotGet(context.Background(), snapshot.ID) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check previously set tags still exist s.checkTagsExist(c, blockstorage.KeyValueToMap(snap.Tags), blockstorage.KeyValueToMap(snapshot.Tags)) // Check new tags were set @@ -117,14 +117,14 @@ func (s *BlockStorageProviderSuite) TestCreateSnapshot(c *C) { s.checkStdTagsExist(c, blockstorage.KeyValueToMap(snap.Tags)) snapshotGet, err := s.provider.SnapshotGet(context.Background(), snapshot.ID) - c.Assert(err, IsNil) - c.Assert(snapshotGet.ID, Equals, snapshot.ID) + c.Assert(err, check.IsNil) + c.Assert(snapshotGet.ID, check.Equals, snapshot.ID) if s.provider.Type() != blockstorage.TypeAD { // Also test creating a volume from this snapshot tags = map[string]string{testTagKey: testTagValue, "kanister.io/testname": c.TestName()} vol, err := s.provider.VolumeCreateFromSnapshot(context.Background(), *snapshot, tags) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.volumes = append(s.volumes, vol) for _, tag := range snapshot.Volume.Tags { if _, found := tags[tag.Key]; !found { @@ -136,18 +136,18 @@ func (s *BlockStorageProviderSuite) TestCreateSnapshot(c *C) { s.checkStdTagsExist(c, blockstorage.KeyValueToMap(vol.Tags)) err = s.provider.SnapshotDelete(context.Background(), snapshot) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // We ensure that multiple deletions are handled. err = s.provider.SnapshotDelete(context.Background(), snapshot) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.snapshots = nil _, err = s.provider.SnapshotGet(context.Background(), snapshot.ID) - c.Assert(err, NotNil) - c.Assert(strings.Contains(err.Error(), blockstorage.SnapshotDoesNotExistError), Equals, true) + c.Assert(err, check.NotNil) + c.Assert(strings.Contains(err.Error(), blockstorage.SnapshotDoesNotExistError), check.Equals, true) } } -func (s *BlockStorageProviderSuite) TestSnapshotCopy(c *C) { +func (s *BlockStorageProviderSuite) TestSnapshotCopy(c *check.C) { if s.storageType == blockstorage.TypeGPD { c.Skip("Skip snapshot copy test for GPD provider since the SnapshotCopy is yet to be implemented for GPD ") } @@ -173,59 +173,59 @@ func (s *BlockStorageProviderSuite) TestSnapshotCopy(c *C) { Volume: nil, } snap, err = s.provider.SnapshotCopyWithArgs(context.TODO(), *srcSnapshot, *dstSnapshot, s.args) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } if s.storageType != blockstorage.TypeAD { snap, err = s.provider.SnapshotCopy(context.TODO(), *srcSnapshot, *dstSnapshot) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } log.Print("Snapshot copied", field.M{"FromSnapshotID": srcSnapshot.ID, "ToSnapshotID": snap.ID}) config := s.getConfig(c, dstSnapshot.Region) provider, err := getter.New().Get(s.storageType, config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) snapDetails, err := provider.SnapshotGet(context.TODO(), snap.ID) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) - c.Check(snapDetails.Region, Equals, dstSnapshot.Region) - c.Check(snapDetails.SizeInBytes, Equals, srcSnapshot.SizeInBytes) + c.Check(snapDetails.Region, check.Equals, dstSnapshot.Region) + c.Check(snapDetails.SizeInBytes, check.Equals, srcSnapshot.SizeInBytes) err = provider.SnapshotDelete(context.TODO(), snap) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = provider.SnapshotDelete(context.TODO(), srcSnapshot) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *BlockStorageProviderSuite) testVolumesList(c *C) { +func (s *BlockStorageProviderSuite) testVolumesList(c *check.C) { var zone string tags := map[string]string{"testtag": "testtagvalue"} zone = s.storageAZ vols, err := s.provider.VolumesList(context.Background(), tags, zone) - c.Assert(err, IsNil) - c.Assert(vols, NotNil) - c.Assert(vols, FitsTypeOf, []*blockstorage.Volume{}) - c.Assert(vols, Not(HasLen), 0) - c.Assert(vols[0].Type, Equals, s.provider.Type()) + c.Assert(err, check.IsNil) + c.Assert(vols, check.NotNil) + c.Assert(vols, check.FitsTypeOf, []*blockstorage.Volume{}) + c.Assert(vols, check.Not(check.HasLen), 0) + c.Assert(vols[0].Type, check.Equals, s.provider.Type()) } -func (s *BlockStorageProviderSuite) TestSnapshotsList(c *C) { +func (s *BlockStorageProviderSuite) TestSnapshotsList(c *check.C) { var tags map[string]string testSnaphot := s.createSnapshot(c) tags = map[string]string{testTagKey: testTagValue} snaps, err := s.provider.SnapshotsList(context.Background(), tags) - c.Assert(err, IsNil) - c.Assert(snaps, NotNil) - c.Assert(snaps, FitsTypeOf, []*blockstorage.Snapshot{}) - c.Assert(snaps, Not(HasLen), 0) - c.Assert(snaps[0].Type, Equals, s.provider.Type()) + c.Assert(err, check.IsNil) + c.Assert(snaps, check.NotNil) + c.Assert(snaps, check.FitsTypeOf, []*blockstorage.Snapshot{}) + c.Assert(snaps, check.Not(check.HasLen), 0) + c.Assert(snaps[0].Type, check.Equals, s.provider.Type()) _ = s.provider.SnapshotDelete(context.Background(), testSnaphot) } // Helpers -func (s *BlockStorageProviderSuite) createVolume(c *C) *blockstorage.Volume { +func (s *BlockStorageProviderSuite) createVolume(c *check.C) *blockstorage.Volume { tags := []*blockstorage.KeyValue{ {Key: testTagKey, Value: testTagValue}, {Key: "kanister.io/testname", Value: c.TestName()}, @@ -243,44 +243,44 @@ func (s *BlockStorageProviderSuite) createVolume(c *C) *blockstorage.Volume { } ret, err := s.provider.VolumeCreate(context.Background(), vol) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.volumes = append(s.volumes, ret) - c.Assert(ret.SizeInBytes, Equals, int64(size)) + c.Assert(ret.SizeInBytes, check.Equals, int64(size)) s.checkTagsExist(c, blockstorage.KeyValueToMap(ret.Tags), blockstorage.KeyValueToMap(tags)) s.checkStdTagsExist(c, blockstorage.KeyValueToMap(ret.Tags)) return ret } -func (s *BlockStorageProviderSuite) createSnapshot(c *C) *blockstorage.Snapshot { +func (s *BlockStorageProviderSuite) createSnapshot(c *check.C) *blockstorage.Snapshot { vol := s.createVolume(c) tags := map[string]string{testTagKey: testTagValue, "kanister.io/testname": c.TestName()} ret, err := s.provider.SnapshotCreate(context.Background(), *vol, tags) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.snapshots = append(s.snapshots, ret) s.checkTagsExist(c, blockstorage.KeyValueToMap(ret.Tags), tags) - c.Assert(s.provider.SnapshotCreateWaitForCompletion(context.Background(), ret), IsNil) - c.Assert(ret.Volume, NotNil) + c.Assert(s.provider.SnapshotCreateWaitForCompletion(context.Background(), ret), check.IsNil) + c.Assert(ret.Volume, check.NotNil) return ret } -func (s *BlockStorageProviderSuite) checkTagsExist(c *C, actual map[string]string, expected map[string]string) { +func (s *BlockStorageProviderSuite) checkTagsExist(c *check.C, actual map[string]string, expected map[string]string) { if s.provider.Type() != blockstorage.TypeEBS { expected = blockstorage.SanitizeTags(expected) } for k, v := range expected { - c.Check(actual[k], Equals, v) + c.Check(actual[k], check.Equals, v) } } -func (s *BlockStorageProviderSuite) checkStdTagsExist(c *C, actual map[string]string) { +func (s *BlockStorageProviderSuite) checkStdTagsExist(c *check.C, actual map[string]string) { stdTags := ktags.GetStdTags() for k := range stdTags { - c.Check(actual[k], NotNil) + c.Check(actual[k], check.NotNil) } } -func (s *BlockStorageProviderSuite) getConfig(c *C, region string) map[string]string { +func (s *BlockStorageProviderSuite) getConfig(c *check.C, region string) map[string]string { config := make(map[string]string) switch s.storageType { case blockstorage.TypeEBS: @@ -313,7 +313,7 @@ func (b *BlockStorageProviderSuite) isRegional(az string) bool { return strings.Contains(az, volume.RegionZoneSeparator) } -func (b *BlockStorageProviderSuite) TestFilterSnasphotWithTags(c *C) { +func (b *BlockStorageProviderSuite) TestFilterSnasphotWithTags(c *check.C) { snapshot1 := &blockstorage.Snapshot{ID: "snap1", Tags: blockstorage.SnapshotTags{ {Key: "key1", Value: "val1"}, {Key: "key3", Value: ""}, @@ -324,14 +324,14 @@ func (b *BlockStorageProviderSuite) TestFilterSnasphotWithTags(c *C) { filterTags := map[string]string{"key1": "val1"} snaps := blockstorage.FilterSnapshotsWithTags([]*blockstorage.Snapshot{snapshot1, snapshot2}, filterTags) - c.Assert(len(snaps), Equals, 1) + c.Assert(len(snaps), check.Equals, 1) snaps = blockstorage.FilterSnapshotsWithTags([]*blockstorage.Snapshot{snapshot1, snapshot2}, nil) - c.Assert(len(snaps), Equals, 2) + c.Assert(len(snaps), check.Equals, 2) snaps = blockstorage.FilterSnapshotsWithTags([]*blockstorage.Snapshot{snapshot1, snapshot2}, map[string]string{}) - c.Assert(len(snaps), Equals, 2) + c.Assert(len(snaps), check.Equals, 2) snaps = blockstorage.FilterSnapshotsWithTags([]*blockstorage.Snapshot{snapshot1, snapshot2}, map[string]string{"bad": "tag"}) - c.Assert(len(snaps), Equals, 0) + c.Assert(len(snaps), check.Equals, 0) } diff --git a/pkg/blockstorage/gcepd/client_test.go b/pkg/blockstorage/gcepd/client_test.go index effee31d6a..3a26ca6fe8 100644 --- a/pkg/blockstorage/gcepd/client_test.go +++ b/pkg/blockstorage/gcepd/client_test.go @@ -21,36 +21,36 @@ import ( "strings" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/blockstorage" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type ClientSuite struct{} -var _ = Suite(&ClientSuite{}) +var _ = check.Suite(&ClientSuite{}) -func (s *ClientSuite) SetUpSuite(c *C) {} +func (s *ClientSuite) SetUpSuite(c *check.C) {} -func (s *ClientSuite) TestClient(c *C) { +func (s *ClientSuite) TestClient(c *check.C) { var zone string filename := s.GetEnvOrSkip(c, blockstorage.GoogleCloudCreds) b, err := os.ReadFile(filename) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) gCli, err := NewClient(context.Background(), string(b)) - c.Assert(err, IsNil) - c.Assert(gCli.Service, NotNil) - c.Assert(*gCli, FitsTypeOf, Client{}) + c.Assert(err, check.IsNil) + c.Assert(gCli.Service, check.NotNil) + c.Assert(*gCli, check.FitsTypeOf, Client{}) // Get zone zone = s.GetEnvOrSkip(c, blockstorage.GoogleCloudZone) _, err = gCli.Service.Disks.List(gCli.ProjectID, zone).Do() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *ClientSuite) GetEnvOrSkip(c *C, varName string) string { +func (s *ClientSuite) GetEnvOrSkip(c *check.C, varName string) string { v := os.Getenv(varName) // Ensure the variable is set if v == "" { @@ -59,7 +59,7 @@ func (s *ClientSuite) GetEnvOrSkip(c *C, varName string) string { return v } -func (s ClientSuite) TestGetRegions(c *C) { +func (s ClientSuite) TestGetRegions(c *check.C) { ctx := context.Background() config := map[string]string{} creds := s.GetEnvOrSkip(c, blockstorage.GoogleCloudCreds) @@ -67,19 +67,19 @@ func (s ClientSuite) TestGetRegions(c *C) { // create provider with region config[blockstorage.GoogleCloudCreds] = creds bsp, err := NewProvider(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) gpds := bsp.(*GpdStorage) // get zones with other region zones, err := gpds.FromRegion(ctx, "us-east1") fmt.Println(zones) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, zone := range zones { - c.Assert(strings.Contains(zone, "us-east1"), Equals, true) - c.Assert(strings.Contains(zone, "us-west1"), Equals, false) + c.Assert(strings.Contains(zone, "us-east1"), check.Equals, true) + c.Assert(strings.Contains(zone, "us-west1"), check.Equals, false) } regions, err := gpds.GetRegions(ctx) - c.Assert(err, IsNil) - c.Assert(regions, NotNil) + c.Assert(err, check.IsNil) + c.Assert(regions, check.NotNil) } diff --git a/pkg/blockstorage/gcepd/zone_test.go b/pkg/blockstorage/gcepd/zone_test.go index 3d64d3810f..3c8a5f0fc9 100644 --- a/pkg/blockstorage/gcepd/zone_test.go +++ b/pkg/blockstorage/gcepd/zone_test.go @@ -15,16 +15,16 @@ package gcepd import ( - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/blockstorage/zone" ) type ZoneSuite struct{} -var _ = Suite(&ZoneSuite{}) +var _ = check.Suite(&ZoneSuite{}) -func (s ZoneSuite) TestZoneWithUnknownNodeZones(c *C) { +func (s ZoneSuite) TestZoneWithUnknownNodeZones(c *check.C) { defaultZones := []string{"us-west2-a", "us-west2-b", "us-west2-c"} for _, tc := range []struct { zones []string @@ -48,6 +48,6 @@ func (s ZoneSuite) TestZoneWithUnknownNodeZones(c *C) { }, } { z := zone.SanitizeAvailableZones(tc.in, tc.zones) - c.Assert(z, DeepEquals, tc.out) + c.Assert(z, check.DeepEquals, tc.out) } } diff --git a/pkg/blockstorage/helper_test.go b/pkg/blockstorage/helper_test.go index 13e0a1dfd8..64021658b8 100644 --- a/pkg/blockstorage/helper_test.go +++ b/pkg/blockstorage/helper_test.go @@ -15,46 +15,46 @@ package blockstorage import ( - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type HelperSuite struct{} -var _ = Suite(&HelperSuite{}) +var _ = check.Suite(&HelperSuite{}) -func (s *HelperSuite) SetUpSuite(c *C) { +func (s *HelperSuite) SetUpSuite(c *check.C) { } -func (h *HelperSuite) TestStringSlice(c *C) { +func (h *HelperSuite) TestStringSlice(c *check.C) { source := []string{"test1", "test2"} target := StringSlice(&source) - c.Assert(target[0], Equals, source[0]) - c.Assert(target[1], Equals, source[1]) + c.Assert(target[0], check.Equals, source[0]) + c.Assert(target[1], check.Equals, source[1]) } -func (s *HelperSuite) TestSliceStringPtr(c *C) { +func (s *HelperSuite) TestSliceStringPtr(c *check.C) { source := []string{"test1", "test2"} res := SliceStringPtr(source) for i, elePtr := range res { var target = *elePtr - c.Assert(target, Equals, source[i]) + c.Assert(target, check.Equals, source[i]) } } -func (s *HelperSuite) TestIntFromPtr(c *C) { +func (s *HelperSuite) TestIntFromPtr(c *check.C) { source := 1 target := Int(&source) - c.Assert(target, Equals, source) + c.Assert(target, check.Equals, source) } -func (s *HelperSuite) TestIntToPtr(c *C) { +func (s *HelperSuite) TestIntToPtr(c *check.C) { source := 1 target := IntPtr(source) - c.Assert(*target, Equals, source) + c.Assert(*target, check.Equals, source) } -func (s *HelperSuite) TestStringToPtr(c *C) { +func (s *HelperSuite) TestStringToPtr(c *check.C) { source := "test" target := StringPtr(source) - c.Assert(*target, Equals, source) + c.Assert(*target, check.Equals, source) } diff --git a/pkg/blockstorage/tags/tags_test.go b/pkg/blockstorage/tags/tags_test.go index 694a7fe2b7..16f8b126cf 100644 --- a/pkg/blockstorage/tags/tags_test.go +++ b/pkg/blockstorage/tags/tags_test.go @@ -3,16 +3,16 @@ package tags import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type TagsSuite struct{} -var _ = Suite(&TagsSuite{}) +var _ = check.Suite(&TagsSuite{}) -func (s *TagsSuite) TestSanitizeValueForGCP(c *C) { +func (s *TagsSuite) TestSanitizeValueForGCP(c *check.C) { for _, tc := range []struct { input string output string @@ -51,6 +51,6 @@ func (s *TagsSuite) TestSanitizeValueForGCP(c *C) { }, } { out := SanitizeValueForGCP(tc.input) - c.Assert(out, Equals, tc.output) + c.Assert(out, check.Equals, tc.output) } } diff --git a/pkg/blockstorage/zone/levenshtein_test.go b/pkg/blockstorage/zone/levenshtein_test.go index 66a12bb837..1a491722ee 100644 --- a/pkg/blockstorage/zone/levenshtein_test.go +++ b/pkg/blockstorage/zone/levenshtein_test.go @@ -15,14 +15,14 @@ package zone import ( - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type LevenshteinSuite struct{} -var _ = Suite(&LevenshteinSuite{}) +var _ = check.Suite(&LevenshteinSuite{}) -func (s LevenshteinSuite) TestLevenshteinMatch(c *C) { +func (s LevenshteinSuite) TestLevenshteinMatch(c *check.C) { for _, tc := range []struct { input string options []string @@ -64,6 +64,6 @@ func (s LevenshteinSuite) TestLevenshteinMatch(c *C) { }, } { out := levenshteinMatch(tc.input, tc.options) - c.Assert(out, Equals, tc.out) + c.Assert(out, check.Equals, tc.out) } } diff --git a/pkg/blockstorage/zone/zone_kube_test.go b/pkg/blockstorage/zone/zone_kube_test.go index 8447ccd1b2..b7aa43e9a5 100644 --- a/pkg/blockstorage/zone/zone_kube_test.go +++ b/pkg/blockstorage/zone/zone_kube_test.go @@ -17,21 +17,21 @@ package zone import ( "context" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/kube" ) type KubeTestZoneSuite struct{} -var _ = Suite(&KubeTestZoneSuite{}) +var _ = check.Suite(&KubeTestZoneSuite{}) -func (s KubeTestZoneSuite) TestNodeZones(c *C) { +func (s KubeTestZoneSuite) TestNodeZones(c *check.C) { c.Skip("Fails in Minikube") ctx := context.Background() cli, err := kube.NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) zones, _, err := NodeZonesAndRegion(ctx, cli) - c.Assert(err, IsNil) - c.Assert(zones, Not(HasLen), 0) + c.Assert(err, check.IsNil) + c.Assert(zones, check.Not(check.HasLen), 0) } diff --git a/pkg/blockstorage/zone/zone_test.go b/pkg/blockstorage/zone/zone_test.go index c6bb205dd6..13858cbb78 100644 --- a/pkg/blockstorage/zone/zone_test.go +++ b/pkg/blockstorage/zone/zone_test.go @@ -21,7 +21,7 @@ import ( "sort" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -31,13 +31,13 @@ import ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type ZoneSuite struct{} -var _ = Suite(&ZoneSuite{}) +var _ = check.Suite(&ZoneSuite{}) -func (s ZoneSuite) TestNodeZoneAndRegionGCP(c *C) { +func (s ZoneSuite) TestNodeZoneAndRegionGCP(c *check.C) { ctx := context.Background() node1 := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -119,16 +119,16 @@ func (s ZoneSuite) TestNodeZoneAndRegionGCP(c *C) { expectedZone["us-west2-c"] = struct{}{} cli := fake.NewSimpleClientset(node1, node2, node3) z, r, err := NodeZonesAndRegion(ctx, cli) - c.Assert(err, IsNil) - c.Assert(reflect.DeepEqual(z, expectedZone), Equals, true) - c.Assert(r, Equals, "us-west2") + c.Assert(err, check.IsNil) + c.Assert(reflect.DeepEqual(z, expectedZone), check.Equals, true) + c.Assert(r, check.Equals, "us-west2") cli = fake.NewSimpleClientset(node4, node5) _, _, err = NodeZonesAndRegion(ctx, cli) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (s ZoneSuite) TestNodeZoneAndRegionEBS(c *C) { +func (s ZoneSuite) TestNodeZoneAndRegionEBS(c *check.C) { ctx := context.Background() node1 := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -210,16 +210,16 @@ func (s ZoneSuite) TestNodeZoneAndRegionEBS(c *C) { expectedZone["us-west-2c"] = struct{}{} cli := fake.NewSimpleClientset(node1, node2, node3) z, r, err := NodeZonesAndRegion(ctx, cli) - c.Assert(err, IsNil) - c.Assert(reflect.DeepEqual(z, expectedZone), Equals, true) - c.Assert(r, Equals, "us-west-2") + c.Assert(err, check.IsNil) + c.Assert(reflect.DeepEqual(z, expectedZone), check.Equals, true) + c.Assert(r, check.Equals, "us-west-2") cli = fake.NewSimpleClientset(node4, node5) _, _, err = NodeZonesAndRegion(ctx, cli) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (s ZoneSuite) TestNodeZoneAndRegionAD(c *C) { +func (s ZoneSuite) TestNodeZoneAndRegionAD(c *check.C) { ctx := context.Background() node1 := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -317,24 +317,24 @@ func (s ZoneSuite) TestNodeZoneAndRegionAD(c *C) { expectedZone["westus2-3"] = struct{}{} cli := fake.NewSimpleClientset(node1, node2, node3) z, r, err := NodeZonesAndRegion(ctx, cli) - c.Assert(err, IsNil) - c.Assert(reflect.DeepEqual(z, expectedZone), Equals, true) - c.Assert(r, Equals, "westus2") + c.Assert(err, check.IsNil) + c.Assert(reflect.DeepEqual(z, expectedZone), check.Equals, true) + c.Assert(r, check.Equals, "westus2") // non-zonal cluster test cli = fake.NewSimpleClientset(node4) z, r, err = NodeZonesAndRegion(ctx, cli) - c.Assert(err, IsNil) - c.Assert(len(z) == 0, Equals, true) - c.Assert(r, Equals, "westus") + c.Assert(err, check.IsNil) + c.Assert(len(z) == 0, check.Equals, true) + c.Assert(r, check.Equals, "westus") // error case cli = fake.NewSimpleClientset(node5, node6) _, _, err = NodeZonesAndRegion(ctx, cli) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (s ZoneSuite) TestSanitizeZones(c *C) { +func (s ZoneSuite) TestSanitizeZones(c *check.C) { for _, tc := range []struct { availableZones map[string]struct{} validZoneNames []string @@ -422,11 +422,11 @@ func (s ZoneSuite) TestSanitizeZones(c *C) { }, } { out := SanitizeAvailableZones(tc.availableZones, tc.validZoneNames) - c.Assert(out, DeepEquals, tc.out) + c.Assert(out, check.DeepEquals, tc.out) } } -func (s ZoneSuite) TestFromSourceRegionZone(c *C) { +func (s ZoneSuite) TestFromSourceRegionZone(c *check.C) { ctx := context.Background() var t = &ebsTest{} node1 := &corev1.Node{ @@ -639,11 +639,11 @@ func (s ZoneSuite) TestFromSourceRegionZone(c *C) { out, err := FromSourceRegionZone(ctx, t, tc.inCli, tc.inRegion, tc.inZones...) sort.Strings(out) sort.Strings(tc.outZones) - c.Assert(out, DeepEquals, tc.outZones) + c.Assert(out, check.DeepEquals, tc.outZones) if err != nil { - c.Assert(err, ErrorMatches, tc.outErr.Error()) + c.Assert(err, check.ErrorMatches, tc.outErr.Error()) } else { - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } } @@ -672,7 +672,7 @@ func (et *ebsTest) FromRegion(ctx context.Context, region string) ([]string, err } } -func (s ZoneSuite) TestGetReadySchedulableNodes(c *C) { +func (s ZoneSuite) TestGetReadySchedulableNodes(c *check.C) { node1 := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -720,22 +720,22 @@ func (s ZoneSuite) TestGetReadySchedulableNodes(c *C) { } cli := fake.NewSimpleClientset(node1, node2, node3) nl, err := GetReadySchedulableNodes(cli) - c.Assert(err, IsNil) - c.Assert(len(nl), Equals, 1) + c.Assert(err, check.IsNil) + c.Assert(len(nl), check.Equals, 1) node1.Spec = corev1.NodeSpec{ Unschedulable: true, } cli = fake.NewSimpleClientset(node1, node2, node3) nl, err = GetReadySchedulableNodes(cli) - c.Assert(err, NotNil) - c.Assert(nl, IsNil) + c.Assert(err, check.NotNil) + c.Assert(nl, check.IsNil) } -func (s ZoneSuite) TestConsistentZones(c *C) { +func (s ZoneSuite) TestConsistentZones(c *check.C) { // no available zones z := consistentZone("source", map[string]struct{}{}) - c.Assert(z, Equals, "") + c.Assert(z, check.Equals, "") az1 := map[string]struct{}{ "a": {}, @@ -752,12 +752,12 @@ func (s ZoneSuite) TestConsistentZones(c *C) { z1 := consistentZone("x", az1) z2 := consistentZone("x", az2) - c.Assert(z1, Equals, z2) + c.Assert(z1, check.Equals, z2) // different lists result in different zones az2["d"] = struct{}{} z1 = consistentZone("x", az1) z2 = consistentZone("x", az2) - c.Assert(z1, Not(Equals), z2) + c.Assert(z1, check.Not(check.Equals), z2) } diff --git a/pkg/blueprint/validate/validate_test.go b/pkg/blueprint/validate/validate_test.go index 8b8d782f74..fbd6636aca 100644 --- a/pkg/blueprint/validate/validate_test.go +++ b/pkg/blueprint/validate/validate_test.go @@ -19,7 +19,7 @@ import ( "strings" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" kanister "github.com/kanisterio/kanister/pkg" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -28,11 +28,11 @@ import ( "github.com/kanisterio/kanister/pkg/utils" ) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type BlueprintTest struct { backupPhases []crv1alpha1.BlueprintPhase - err Checker + err check.Checker errContains string deferPhase *crv1alpha1.BlueprintPhase restorePhases []crv1alpha1.BlueprintPhase @@ -44,9 +44,9 @@ const ( type ValidateBlueprint struct{} -var _ = Suite(&ValidateBlueprint{}) +var _ = check.Suite(&ValidateBlueprint{}) -func (v *ValidateBlueprint) TestValidate(c *C) { +func (v *ValidateBlueprint) TestValidate(c *check.C) { for _, tc := range []BlueprintTest{ { backupPhases: []crv1alpha1.BlueprintPhase{ @@ -76,7 +76,7 @@ func (v *ValidateBlueprint) TestValidate(c *C) { }, }, errContains: "Required arg missing: command", - err: NotNil, + err: check.NotNil, }, { backupPhases: []crv1alpha1.BlueprintPhase{ @@ -98,7 +98,7 @@ func (v *ValidateBlueprint) TestValidate(c *C) { }, }, }, - err: IsNil, + err: check.IsNil, }, { // function name is incorrect @@ -122,7 +122,7 @@ func (v *ValidateBlueprint) TestValidate(c *C) { }, }, errContains: "Requested function {KubeTasks} has not been registered", - err: NotNil, + err: check.NotNil, }, { backupPhases: []crv1alpha1.BlueprintPhase{ @@ -136,7 +136,7 @@ func (v *ValidateBlueprint) TestValidate(c *C) { }, }, }, - err: IsNil, + err: check.IsNil, }, { backupPhases: []crv1alpha1.BlueprintPhase{ @@ -150,7 +150,7 @@ func (v *ValidateBlueprint) TestValidate(c *C) { }, }, errContains: "Required arg missing: command", - err: NotNil, + err: check.NotNil, }, { backupPhases: []crv1alpha1.BlueprintPhase{ @@ -164,7 +164,7 @@ func (v *ValidateBlueprint) TestValidate(c *C) { }, }, errContains: "Required arg missing: command", - err: NotNil, + err: check.NotNil, deferPhase: &crv1alpha1.BlueprintPhase{ Func: "PrepareData", Name: "51", @@ -187,7 +187,7 @@ func (v *ValidateBlueprint) TestValidate(c *C) { }, }, errContains: "Required arg missing: command", - err: NotNil, + err: check.NotNil, deferPhase: &crv1alpha1.BlueprintPhase{ Func: "PrepareData", Name: "61", @@ -209,7 +209,7 @@ func (v *ValidateBlueprint) TestValidate(c *C) { }, }, }, - err: IsNil, + err: check.IsNil, deferPhase: &crv1alpha1.BlueprintPhase{ Func: "PrepareData", Name: "71", @@ -228,13 +228,13 @@ func (v *ValidateBlueprint) TestValidate(c *C) { } err := Do(bp, kanister.DefaultVersion) if err != nil { - c.Assert(strings.Contains(err.Error(), tc.errContains), Equals, true) + c.Assert(strings.Contains(err.Error(), tc.errContains), check.Equals, true) } c.Assert(err, tc.err) } } -func (v *ValidateBlueprint) TestValidateNonDefaultVersion(c *C) { +func (v *ValidateBlueprint) TestValidateNonDefaultVersion(c *check.C) { for _, tc := range []BlueprintTest{ { backupPhases: []crv1alpha1.BlueprintPhase{ @@ -257,7 +257,7 @@ func (v *ValidateBlueprint) TestValidateNonDefaultVersion(c *C) { }, }, }, - err: IsNil, + err: check.IsNil, }, { // blueprint with one function that is registered with default version and @@ -282,7 +282,7 @@ func (v *ValidateBlueprint) TestValidateNonDefaultVersion(c *C) { }, }, }, - err: NotNil, + err: check.NotNil, errContains: "argument ndVersionArg23 is not supported", }, { @@ -306,7 +306,7 @@ func (v *ValidateBlueprint) TestValidateNonDefaultVersion(c *C) { }, }, }, - err: NotNil, + err: check.NotNil, errContains: "Required arg missing: ndVersionArg2", }, { @@ -331,20 +331,20 @@ func (v *ValidateBlueprint) TestValidateNonDefaultVersion(c *C) { }, }, }, - err: IsNil, + err: check.IsNil, }, } { bp := blueprint() bp.Actions["backup"].Phases = tc.backupPhases err := Do(bp, nonDefaultFuncVersion) if err != nil { - c.Assert(strings.Contains(err.Error(), tc.errContains), Equals, true) + c.Assert(strings.Contains(err.Error(), tc.errContains), check.Equals, true) } c.Assert(err, tc.err) } } -func (v *ValidateBlueprint) TestValidateAnnLabelArgs(c *C) { +func (v *ValidateBlueprint) TestValidateAnnLabelArgs(c *check.C) { for _, tc := range []struct { labels interface{} annotations interface{} @@ -441,14 +441,14 @@ func (v *ValidateBlueprint) TestValidateAnnLabelArgs(c *C) { } err := Do(bp, kanister.DefaultVersion) if tc.error != "" { - c.Assert(strings.Contains(err.Error(), tc.error), Equals, true) + c.Assert(strings.Contains(err.Error(), tc.error), check.Equals, true) } else { - c.Assert(err, Equals, nil) + c.Assert(err, check.Equals, nil) } } } -func (v *ValidateBlueprint) TestValidatePhaseNames(c *C) { +func (v *ValidateBlueprint) TestValidatePhaseNames(c *check.C) { for _, tc := range []BlueprintTest{ { backupPhases: []crv1alpha1.BlueprintPhase{ @@ -460,7 +460,7 @@ func (v *ValidateBlueprint) TestValidatePhaseNames(c *C) { {Name: "phasefour"}, {Name: "phasefive"}, }, - err: IsNil, + err: check.IsNil, deferPhase: &crv1alpha1.BlueprintPhase{ Name: "phasesix", }, @@ -476,7 +476,7 @@ func (v *ValidateBlueprint) TestValidatePhaseNames(c *C) { {Name: "phasefour"}, {Name: "phasefive"}, }, - err: NotNil, + err: check.NotNil, errContains: "Duplicated phase name is not allowed. Violating phase 'phaseone'", deferPhase: &crv1alpha1.BlueprintPhase{ Name: "phasesix", @@ -493,7 +493,7 @@ func (v *ValidateBlueprint) TestValidatePhaseNames(c *C) { {Name: "phaseone"}, {Name: "phasefive"}, }, - err: NotNil, + err: check.NotNil, errContains: "Duplicated phase name is not allowed. Violating phase 'phaseone'", deferPhase: &crv1alpha1.BlueprintPhase{ Name: "phasesix", @@ -510,7 +510,7 @@ func (v *ValidateBlueprint) TestValidatePhaseNames(c *C) { {Name: "phasefour"}, {Name: "phasefive"}, }, - err: NotNil, + err: check.NotNil, errContains: "Duplicated phase name is not allowed. Violating phase 'phaseone'", deferPhase: &crv1alpha1.BlueprintPhase{ Name: "phaseone", @@ -525,7 +525,7 @@ func (v *ValidateBlueprint) TestValidatePhaseNames(c *C) { } err := validatePhaseNames(bp) if err != nil { - c.Assert(strings.Contains(err.Error(), tc.errContains), Equals, true) + c.Assert(strings.Contains(err.Error(), tc.errContains), check.Equals, true) } c.Assert(err, tc.err) } diff --git a/pkg/caller/caller_test.go b/pkg/caller/caller_test.go index 22bcc3e1bc..3a835478a0 100644 --- a/pkg/caller/caller_test.go +++ b/pkg/caller/caller_test.go @@ -3,36 +3,36 @@ package caller import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) func Test(t *testing.T) { - TestingT(t) + check.TestingT(t) } type CallerSuite struct{} -var _ = Suite(&CallerSuite{}) +var _ = check.Suite(&CallerSuite{}) -func (s *CallerSuite) TestCallerFields(c *C) { +func (s *CallerSuite) TestCallerFields(c *check.C) { frame := GetFrame(0) // Check against the GetFrame() function - c.Assert(frame.Function, Equals, "github.com/kanisterio/kanister/pkg/caller.GetFrame") + c.Assert(frame.Function, check.Equals, "github.com/kanisterio/kanister/pkg/caller.GetFrame") // This breaks if the function is relocated or the file is renamed or the // this test is run independently - c.Assert(frame.File, Equals, "pkg/caller/frame.go") + c.Assert(frame.File, check.Equals, "pkg/caller/frame.go") frame = indirectCall(1) // Check against the indirectCall() function - c.Assert(frame.Function, Equals, "github.com/kanisterio/kanister/pkg/caller.indirectCall") + c.Assert(frame.Function, check.Equals, "github.com/kanisterio/kanister/pkg/caller.indirectCall") // This breaks if the function is relocated or the file is renamed - c.Assert(frame.File, Equals, "pkg/caller/caller_test.go") + c.Assert(frame.File, check.Equals, "pkg/caller/caller_test.go") frame = indirectCall(2) // Check against this function - c.Assert(frame.Function, Equals, "github.com/kanisterio/kanister/pkg/caller.(*CallerSuite).TestCallerFields") + c.Assert(frame.Function, check.Equals, "github.com/kanisterio/kanister/pkg/caller.(*CallerSuite).TestCallerFields") // This breaks if the function is relocated or the file is renamed - c.Assert(frame.File, Equals, "pkg/caller/caller_test.go") + c.Assert(frame.File, check.Equals, "pkg/caller/caller_test.go") } func indirectCall(depth int) Frame { diff --git a/pkg/chronicle/chronicle_push_test.go b/pkg/chronicle/chronicle_push_test.go index 0e7987512a..7180de03b1 100644 --- a/pkg/chronicle/chronicle_push_test.go +++ b/pkg/chronicle/chronicle_push_test.go @@ -19,7 +19,7 @@ import ( "os" "path/filepath" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/apimachinery/pkg/util/rand" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -29,9 +29,9 @@ import ( type ChroniclePushSuite struct{} -var _ = Suite(&ChroniclePushSuite{}) +var _ = check.Suite(&ChroniclePushSuite{}) -func (s *ChroniclePushSuite) TestPush(c *C) { +func (s *ChroniclePushSuite) TestPush(c *check.C) { osType := objectstore.ProviderTypeS3 loc := crv1alpha1.Location{ Type: crv1alpha1.LocationTypeS3Compliant, @@ -41,11 +41,11 @@ func (s *ChroniclePushSuite) TestPush(c *C) { prof := *testutil.ObjectStoreProfileOrSkip(c, osType, loc) pp := filepath.Join(c.MkDir(), "profile.json") err := writeProfile(pp, prof) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) a := filepath.Join(c.MkDir(), "artifact") err = os.WriteFile(a, []byte(rand.String(10)), os.ModePerm) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) p := PushParams{ ProfilePath: pp, ArtifactFile: a, @@ -53,5 +53,5 @@ func (s *ChroniclePushSuite) TestPush(c *C) { } ctx := context.Background() err = push(ctx, p, 0) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } diff --git a/pkg/chronicle/chronicle_test.go b/pkg/chronicle/chronicle_test.go index 0339471f1e..07772e5f5a 100644 --- a/pkg/chronicle/chronicle_test.go +++ b/pkg/chronicle/chronicle_test.go @@ -24,7 +24,7 @@ import ( "strings" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/apimachinery/pkg/util/rand" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -34,15 +34,15 @@ import ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type ChronicleSuite struct { profile param.Profile } -var _ = Suite(&ChronicleSuite{}) +var _ = check.Suite(&ChronicleSuite{}) -func (s *ChronicleSuite) SetUpSuite(c *C) { +func (s *ChronicleSuite) SetUpSuite(c *check.C) { osType := objectstore.ProviderTypeS3 loc := crv1alpha1.Location{ Type: crv1alpha1.LocationTypeS3Compliant, @@ -52,15 +52,15 @@ func (s *ChronicleSuite) SetUpSuite(c *C) { s.profile = *testutil.ObjectStoreProfileOrSkip(c, osType, loc) } -func (s *ChronicleSuite) TestPushPull(c *C) { +func (s *ChronicleSuite) TestPushPull(c *check.C) { pp := filepath.Join(c.MkDir(), "profile.json") err := writeProfile(pp, s.profile) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) a := filepath.Join(c.MkDir(), "artifact") ap := rand.String(10) err = os.WriteFile(a, []byte(ap), os.ModePerm) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) p := PushParams{ ProfilePath: pp, ArtifactFile: a, @@ -71,34 +71,34 @@ func (s *ChronicleSuite) TestPushPull(c *C) { // Write i to bucket p.Command = []string{"echo", strconv.Itoa(i)} err = push(ctx, p, i) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Pull and check that we still get i buf := bytes.NewBuffer(nil) c.Log("File: ", p.ArtifactFile) err = Pull(ctx, buf, s.profile, ap) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) str, err := io.ReadAll(buf) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Remove additional '\n' t := strings.TrimSuffix(string(str), "\n") - c.Assert(t, Equals, strconv.Itoa(i)) + c.Assert(t, check.Equals, strconv.Itoa(i)) } } -func (s *ChronicleSuite) TestEnv(c *C) { +func (s *ChronicleSuite) TestEnv(c *check.C) { ctx := context.Background() cmd := []string{"echo", "X:", "$X"} suffix := c.TestName() + rand.String(5) env := []string{"X=foo"} err := pushWithEnv(ctx, cmd, suffix, 0, s.profile, env) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) buf := bytes.NewBuffer(nil) err = Pull(ctx, buf, s.profile, suffix) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) str, err := io.ReadAll(buf) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) t := strings.TrimSuffix(string(str), "\n") - c.Assert(t, Equals, "X: foo") + c.Assert(t, check.Equals, "X: foo") } diff --git a/pkg/consts/consts.go b/pkg/consts/consts.go index c6e1e1603e..b6878bad26 100644 --- a/pkg/consts/consts.go +++ b/pkg/consts/consts.go @@ -55,7 +55,7 @@ const ( const ( LatestKanisterToolsImage = "ghcr.io/kanisterio/kanister-tools:v9.99.9-dev" - KanisterToolsImage = "ghcr.io/kanisterio/kanister-tools:0.110.0" + KanisterToolsImage = "ghcr.io/kanisterio/kanister-tools:0.111.0" ) // KanisterToolsImageEnvName is used to set up a custom kanister-tools image diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index 2540240831..b946b3ce4a 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -24,7 +24,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" promgomodel "github.com/prometheus/client_model/go" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -48,7 +48,7 @@ import ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type ControllerSuite struct { crCli crclientv1alpha1.CrV1alpha1Interface @@ -62,19 +62,19 @@ type ControllerSuite struct { ctrl *Controller } -var _ = Suite(&ControllerSuite{}) +var _ = check.Suite(&ControllerSuite{}) const ( testAction = "myAction" ) -func (s *ControllerSuite) SetUpSuite(c *C) { +func (s *ControllerSuite) SetUpSuite(c *check.C) { config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cli, err := kubernetes.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) crCli, err := crclientv1alpha1.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Make sure the CRD's exist. _ = resource.CreateCustomResources(context.Background(), config) @@ -91,69 +91,69 @@ func (s *ControllerSuite) SetUpSuite(c *C) { } ctx := context.Background() cns, err := s.cli.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name sec := testutil.NewTestProfileSecret() sec, err = s.cli.CoreV1().Secrets(s.namespace).Create(ctx, sec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) p := testutil.NewTestProfile(s.namespace, sec.GetName()) _, err = s.crCli.Profiles(s.namespace).Create(ctx, p, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ss := testutil.NewTestStatefulSet(1) ss, err = s.cli.AppsV1().StatefulSets(s.namespace).Create(ctx, ss, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.ss = ss err = kube.WaitOnStatefulSetReady(ctx, s.cli, s.namespace, s.ss.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) d := testutil.NewTestDeployment(1) d, err = s.cli.AppsV1().Deployments(s.namespace).Create(ctx, d, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.deployment = d err = kube.WaitOnDeploymentReady(ctx, s.cli, s.namespace, s.deployment.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cm := testutil.NewTestConfigMap() cm, err = s.cli.CoreV1().ConfigMaps(s.namespace).Create(ctx, cm, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.confimap = cm } -func (s *ControllerSuite) TearDownSuite(c *C) { +func (s *ControllerSuite) TearDownSuite(c *check.C) { err := os.Unsetenv(kube.PodNSEnvVar) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } } -func (s *ControllerSuite) SetUpTest(c *C) { +func (s *ControllerSuite) SetUpTest(c *check.C) { config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctx := context.Background() ctx, cancel := context.WithCancel(ctx) testPrometheusRegistry := prometheus.NewRegistry() s.ctrl = New(config, testPrometheusRegistry) err = s.ctrl.StartWatch(ctx, s.namespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cancel = cancel } -func (s *ControllerSuite) TearDownTest(c *C) { +func (s *ControllerSuite) TearDownTest(c *check.C) { s.cancel() } -func (s *ControllerSuite) TestWatch(c *C) { +func (s *ControllerSuite) TestWatch(c *check.C) { // We give it a few seconds complete it's scan. This isn't required for the // test, but is a more realistic startup scenario. time.Sleep(5 * time.Second) } -func (s *ControllerSuite) waitOnActionSetState(c *C, as *crv1alpha1.ActionSet, state crv1alpha1.State) error { +func (s *ControllerSuite) waitOnActionSetState(c *check.C, as *crv1alpha1.ActionSet, state crv1alpha1.State) error { ctx, cancel := context.WithTimeout(context.Background(), 40*time.Second) defer cancel() err := poll.Wait(ctx, func(context.Context) (bool, error) { @@ -179,7 +179,7 @@ func (s *ControllerSuite) waitOnActionSetState(c *C, as *crv1alpha1.ActionSet, s return errors.Wrapf(err, "State '%s' never reached", state) } -func (s *ControllerSuite) waitOnDeferPhaseState(c *C, as *crv1alpha1.ActionSet, state crv1alpha1.State) error { +func (s *ControllerSuite) waitOnDeferPhaseState(c *check.C, as *crv1alpha1.ActionSet, state crv1alpha1.State) error { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() err := poll.Wait(ctx, func(ctx context.Context) (bool, error) { @@ -458,7 +458,7 @@ func getCounterVecValue(metric prometheus.CounterVec, metricLabels []string) flo return m.Counter.GetValue() } -func (s *ControllerSuite) TestEmptyActionSetStatus(c *C) { +func (s *ControllerSuite) TestEmptyActionSetStatus(c *check.C) { as := &crv1alpha1.ActionSet{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "testactionset-", @@ -468,17 +468,17 @@ func (s *ControllerSuite) TestEmptyActionSetStatus(c *C) { }, } as, err := s.crCli.ActionSets(s.namespace).Create(context.TODO(), as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err := s.crCli.ActionSets(s.namespace).Delete(context.TODO(), as.GetName(), metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() err = s.waitOnActionSetState(c, as, crv1alpha1.StateComplete) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *ControllerSuite) TestSynchronousFailure(c *C) { +func (s *ControllerSuite) TestSynchronousFailure(c *check.C) { as := &crv1alpha1.ActionSet{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "testactionset-", @@ -496,26 +496,26 @@ func (s *ControllerSuite) TestSynchronousFailure(c *C) { }, } as, err := s.crCli.ActionSets(s.namespace).Create(context.TODO(), as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err := s.crCli.ActionSets(s.namespace).Delete(context.TODO(), as.GetName(), metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() err = s.waitOnActionSetState(c, as, crv1alpha1.StateFailed) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *ControllerSuite) TestNilPrometheusRegistry(c *C) { +func (s *ControllerSuite) TestNilPrometheusRegistry(c *check.C) { config, err := kube.LoadConfig() - c.Assert(err, IsNil) - c.Assert(config, NotNil) + c.Assert(err, check.IsNil) + c.Assert(config, check.NotNil) ctrl := New(config, nil) - c.Assert(ctrl, NotNil) - c.Assert(ctrl.metrics, IsNil) + c.Assert(ctrl, check.NotNil) + c.Assert(ctrl.metrics, check.IsNil) } -func (s *ControllerSuite) TestExecActionSet(c *C) { +func (s *ControllerSuite) TestExecActionSet(c *check.C) { for _, pok := range []string{"StatefulSet", "Deployment"} { for _, tc := range []struct { funcNames []string @@ -615,7 +615,7 @@ func (s *ControllerSuite) TestExecActionSet(c *C) { bp = testutil.BlueprintWithConfigMap(bp) ctx := context.Background() bp, err = s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) oldValue := getCounterVecValue(s.ctrl.metrics.actionSetResolutionCounterVec, []string{tc.metricResolution}) @@ -633,7 +633,7 @@ func (s *ControllerSuite) TestExecActionSet(c *C) { as := testutil.NewTestActionSet(s.namespace, bp.GetName(), pok, n, s.namespace, tc.version, testAction) as = testutil.ActionSetWithConfigMap(as, s.confimap.GetName()) as, err = s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil, Commentf("Failed case: %s", tc.name)) + c.Assert(err, check.IsNil, check.Commentf("Failed case: %s", tc.name)) final := crv1alpha1.StateComplete cancel := false @@ -642,40 +642,40 @@ func (s *ControllerSuite) TestExecActionSet(c *C) { switch fn { case testutil.FailFuncName: final = crv1alpha1.StateFailed - c.Assert(testutil.FailFuncError().Error(), DeepEquals, "Kanister function failed", Commentf("Failed case: %s", tc.name)) + c.Assert(testutil.FailFuncError().Error(), check.DeepEquals, "Kanister function failed", check.Commentf("Failed case: %s", tc.name)) break Loop case testutil.WaitFuncName: testutil.ReleaseWaitFunc() case testutil.ArgFuncName: - c.Assert(testutil.ArgFuncArgs(), DeepEquals, map[string]interface{}{"key": "myValue"}, Commentf("Failed case: %s", tc.name)) + c.Assert(testutil.ArgFuncArgs(), check.DeepEquals, map[string]interface{}{"key": "myValue"}, check.Commentf("Failed case: %s", tc.name)) case testutil.OutputFuncName: - c.Assert(testutil.OutputFuncOut(), DeepEquals, map[string]interface{}{"key": "myValue"}, Commentf("Failed case: %s", tc.name)) + c.Assert(testutil.OutputFuncOut(), check.DeepEquals, map[string]interface{}{"key": "myValue"}, check.Commentf("Failed case: %s", tc.name)) case testutil.CancelFuncName: testutil.CancelFuncStarted() err = s.crCli.ActionSets(s.namespace).Delete(context.TODO(), as.GetName(), metav1.DeleteOptions{}) - c.Assert(err, IsNil) - c.Assert(testutil.CancelFuncOut().Error(), DeepEquals, "context canceled") + c.Assert(err, check.IsNil) + c.Assert(testutil.CancelFuncOut().Error(), check.DeepEquals, "context canceled") cancel = true case testutil.VersionMismatchFuncName: final = crv1alpha1.StateFailed - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } if !cancel { err = s.waitOnActionSetState(c, as, final) - c.Assert(err, IsNil, Commentf("Failed case: %s", tc.name)) + c.Assert(err, check.IsNil, check.Commentf("Failed case: %s", tc.name)) expectedValue := oldValue + 1 err = waitForMetrics(s.ctrl.metrics.actionSetResolutionCounterVec, []string{tc.metricResolution}, expectedValue, time.Second) - c.Assert(err, IsNil, Commentf("Failed case: %s, failed waiting for metric update to %v", tc.name, expectedValue)) + c.Assert(err, check.IsNil, check.Commentf("Failed case: %s, failed waiting for metric update to %v", tc.name, expectedValue)) } err = s.crCli.Blueprints(s.namespace).Delete(context.TODO(), bp.GetName(), metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.crCli.ActionSets(s.namespace).Delete(context.TODO(), as.GetName(), metav1.DeleteOptions{}) if !cancel { - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } else { - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } } } @@ -697,7 +697,7 @@ func waitForMetrics(metrics prometheus.CounterVec, labels []string, expected flo return err } -func (s *ControllerSuite) TestRuntimeObjEventLogs(c *C) { +func (s *ControllerSuite) TestRuntimeObjEventLogs(c *check.C) { c.Skip("This may not work in MiniKube") ctx := context.Background() // Create ActionSet @@ -714,7 +714,7 @@ func (s *ControllerSuite) TestRuntimeObjEventLogs(c *C) { }, } as, err := s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) msg := "Unit testing event logs" reason := "Test Logs" @@ -724,85 +724,85 @@ func (s *ControllerSuite) TestRuntimeObjEventLogs(c *C) { // Create Blueprint bp := testutil.NewTestBlueprint("StatefulSet", testutil.WaitFuncName) bp, err = s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Test the logAndErrorEvent function ctx = field.Context(ctx, consts.ActionsetNameKey, as.GetName()) config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctlr := New(config, nil) ctlr.logAndErrorEvent(ctx, msg, reason, errors.New("Testing Event Logs"), as, nilAs, bp) // Test ActionSet error event logging events, err := s.cli.CoreV1().Events(as.Namespace).Search(scheme.Scheme, as) - c.Assert(err, IsNil) - c.Assert(events, NotNil) - c.Assert(len(events.Items) > 0, Equals, true) - c.Assert(events.Items[0].Message, Equals, msg) + c.Assert(err, check.IsNil) + c.Assert(events, check.NotNil) + c.Assert(len(events.Items) > 0, check.Equals, true) + c.Assert(events.Items[0].Message, check.Equals, msg) // Testing nil ActionSet error event logging events, err = s.cli.CoreV1().Events(as.Namespace).Search(scheme.Scheme, nilAs) - c.Assert(err, NotNil) - c.Assert(len(events.Items), Equals, 0) + c.Assert(err, check.NotNil) + c.Assert(len(events.Items), check.Equals, 0) // Testing Blueprint error event logging events, err = s.cli.CoreV1().Events(bp.Namespace).Search(scheme.Scheme, bp) - c.Assert(err, IsNil) - c.Assert(events, NotNil) - c.Assert(len(events.Items) > 0, Equals, true) - c.Assert(events.Items[0].Message, Equals, msg) + c.Assert(err, check.IsNil) + c.Assert(events, check.NotNil) + c.Assert(len(events.Items) > 0, check.Equals, true) + c.Assert(events.Items[0].Message, check.Equals, msg) // Testing empty Blueprint testbp := &crv1alpha1.Blueprint{} ctlr.logAndErrorEvent(ctx, msg, reason, errors.New("Testing Event Logs"), testbp) events, err = s.cli.CoreV1().Events(bp.Namespace).Search(scheme.Scheme, testbp) - c.Assert(err, NotNil) - c.Assert(len(events.Items), Equals, 0) + c.Assert(err, check.NotNil) + c.Assert(len(events.Items), check.Equals, 0) } -func (s *ControllerSuite) TestDeferPhase(c *C) { +func (s *ControllerSuite) TestDeferPhase(c *check.C) { err := os.Setenv(kube.PodNSEnvVar, "test") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctx := context.Background() bp := newBPWithDeferPhase() bp, err = s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // create backup actionset and wait for it to be completed as := testutil.NewTestActionSet(s.namespace, bp.GetName(), "Deployment", s.deployment.GetName(), s.namespace, kanister.DefaultVersion, "backup") as, err = s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, as, crv1alpha1.StateRunning) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // make sure deferPhase is also run successfully err = s.waitOnDeferPhaseState(c, as, crv1alpha1.StateComplete) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, as, crv1alpha1.StateComplete) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) as, err = s.crCli.ActionSets(s.namespace).Get(ctx, as.Name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // create restore actionset and wait for it to be completed // if it's completed, simply means artifacts are rendered correctly ras := testutil.NewTestRestoreActionSet(s.namespace, bp.GetName(), s.deployment.GetName(), as.Status.Actions[0].Artifacts) ras, err = s.crCli.ActionSets(s.namespace).Create(ctx, ras, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, ras, crv1alpha1.StateRunning) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, ras, crv1alpha1.StateComplete) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) as, err = s.crCli.ActionSets(s.namespace).Get(ctx, as.Name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) - c.Assert(as.Status.Actions[0].Artifacts["mainPhaseOutputOne"].KeyValue, DeepEquals, map[string]string{"op": "mainValue"}) - c.Assert(as.Status.Actions[0].Artifacts["mainPhaseOutputTwo"].KeyValue, DeepEquals, map[string]string{"op": "mainValueTwo"}) - c.Assert(as.Status.Actions[0].Artifacts["deferPhaseOutput"].KeyValue, DeepEquals, map[string]string{"op": "deferValue"}) + c.Assert(as.Status.Actions[0].Artifacts["mainPhaseOutputOne"].KeyValue, check.DeepEquals, map[string]string{"op": "mainValue"}) + c.Assert(as.Status.Actions[0].Artifacts["mainPhaseOutputTwo"].KeyValue, check.DeepEquals, map[string]string{"op": "mainValueTwo"}) + c.Assert(as.Status.Actions[0].Artifacts["deferPhaseOutput"].KeyValue, check.DeepEquals, map[string]string{"op": "deferValue"}) } // TestDeferPhaseCoreErr tests a blueprint with multiple main phases and deferPhase @@ -811,189 +811,189 @@ func (s *ControllerSuite) TestDeferPhase(c *C) { // 2. DeferPhase is run successfully and status is complete // 3. Phases have correct state in actionset status // 4. We don't render output artifacts if any of the phases failed -func (s *ControllerSuite) TestDeferPhaseCoreErr(c *C) { +func (s *ControllerSuite) TestDeferPhaseCoreErr(c *check.C) { err := os.Setenv(kube.PodNSEnvVar, "test") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctx := context.Background() bp := newBPWithDeferPhaseAndErrInCorePhase() bp, err = s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) as := testutil.NewTestActionSet(s.namespace, bp.GetName(), "Deployment", s.deployment.GetName(), s.namespace, kanister.DefaultVersion, "backup") as, err = s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, as, crv1alpha1.StateRunning) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // wait for deferPhase to be completed, because actionset status would be set to failed as soon as a main phase fails err = s.waitOnDeferPhaseState(c, as, crv1alpha1.StateComplete) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, as, crv1alpha1.StateFailed) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // get the actionset again to have updated status as, err = s.crCli.ActionSets(s.namespace).Get(ctx, as.Name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // make sure the phases that errored have state to be se as failed in actionset status // since we just have backup action, we are using 0th index here - c.Assert(as.Status.Actions[0].Phases[0].State, Equals, crv1alpha1.StateComplete) - c.Assert(as.Status.Actions[0].Phases[1].State, Equals, crv1alpha1.StateFailed) - c.Assert(as.Status.Actions[0].DeferPhase.State, Equals, crv1alpha1.StateComplete) + c.Assert(as.Status.Actions[0].Phases[0].State, check.Equals, crv1alpha1.StateComplete) + c.Assert(as.Status.Actions[0].Phases[1].State, check.Equals, crv1alpha1.StateFailed) + c.Assert(as.Status.Actions[0].DeferPhase.State, check.Equals, crv1alpha1.StateComplete) // we don't render template if any of the core phases or defer phases failed } -func (s *ControllerSuite) TestDeferPhaseDeferErr(c *C) { +func (s *ControllerSuite) TestDeferPhaseDeferErr(c *check.C) { err := os.Setenv(kube.PodNSEnvVar, "test") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctx := context.Background() bp := newBPWithDeferPhaseAndErrInDeferPhase() bp, err = s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) as := testutil.NewTestActionSet(s.namespace, bp.GetName(), "Deployment", s.deployment.GetName(), s.namespace, kanister.DefaultVersion, "backup") as, err = s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, as, crv1alpha1.StateRunning) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // wait for deferPhase to fail err = s.waitOnDeferPhaseState(c, as, crv1alpha1.StateFailed) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, as, crv1alpha1.StateFailed) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // get the actionset again to have updated status as, err = s.crCli.ActionSets(s.namespace).Get(ctx, as.Name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // make sure the phases that errored have state set as failed in actionset status // since we just have backup action, we are using 0th index here - c.Assert(as.Status.Actions[0].Phases[0].State, Equals, crv1alpha1.StateComplete) - c.Assert(as.Status.Actions[0].Phases[1].State, Equals, crv1alpha1.StateComplete) - c.Assert(as.Status.Actions[0].DeferPhase.State, Equals, crv1alpha1.StateFailed) + c.Assert(as.Status.Actions[0].Phases[0].State, check.Equals, crv1alpha1.StateComplete) + c.Assert(as.Status.Actions[0].Phases[1].State, check.Equals, crv1alpha1.StateComplete) + c.Assert(as.Status.Actions[0].DeferPhase.State, check.Equals, crv1alpha1.StateFailed) } -func (s *ControllerSuite) TestPhaseOutputAsArtifact(c *C) { +func (s *ControllerSuite) TestPhaseOutputAsArtifact(c *check.C) { ctx := context.Background() // Create a blueprint that uses func output as artifact bp := newBPWithOutputArtifact() bp = testutil.BlueprintWithConfigMap(bp) bp, err := s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Add an actionset that references that blueprint. as := testutil.NewTestActionSet(s.namespace, bp.GetName(), "Deployment", s.deployment.GetName(), s.namespace, kanister.DefaultVersion, testAction) as = testutil.ActionSetWithConfigMap(as, s.confimap.GetName()) as, err = s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, as, crv1alpha1.StateRunning) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check if the func returned expected output - c.Assert(testutil.OutputFuncOut(), DeepEquals, map[string]interface{}{"key": "myValue"}) + c.Assert(testutil.OutputFuncOut(), check.DeepEquals, map[string]interface{}{"key": "myValue"}) err = s.waitOnActionSetState(c, as, crv1alpha1.StateComplete) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check if the artifacts got updated correctly as, _ = s.crCli.ActionSets(as.GetNamespace()).Get(ctx, as.GetName(), metav1.GetOptions{}) arts := as.Status.Actions[0].Artifacts - c.Assert(arts, NotNil) - c.Assert(arts, HasLen, 1) + c.Assert(arts, check.NotNil) + c.Assert(arts, check.HasLen, 1) keyVal := arts["myArt"].KeyValue - c.Assert(keyVal, DeepEquals, map[string]string{"key": "myValue"}) + c.Assert(keyVal, check.DeepEquals, map[string]string{"key": "myValue"}) } -func (s *ControllerSuite) TestPhaseOutputParallelActions(c *C) { +func (s *ControllerSuite) TestPhaseOutputParallelActions(c *check.C) { ctx := context.Background() // Create a blueprint that uses func output as artifact bp := newBPWithOutputArtifact() bp = testutil.BlueprintWithConfigMap(bp) bp, err := s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Create another blueprint bp1 := testutil.NewTestBlueprint("Deployment", testutil.WaitFuncName) bp1, err = s.crCli.Blueprints(s.namespace).Create(ctx, bp1, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Add an actionset that runs actions from two blueprints in parallel as := testutil.NewTestMultiActionActionSet(s.namespace, bp1.GetName(), testAction, bp.GetName(), testAction, "Deployment", s.deployment.GetName(), s.namespace, kanister.DefaultVersion) as = testutil.ActionSetWithConfigMap(as, s.confimap.GetName()) as, err = s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, as, crv1alpha1.StateRunning) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check if the func returned expected output - c.Assert(testutil.OutputFuncOut(), DeepEquals, map[string]interface{}{"key": "myValue"}) + c.Assert(testutil.OutputFuncOut(), check.DeepEquals, map[string]interface{}{"key": "myValue"}) testutil.ReleaseWaitFunc() err = s.waitOnActionSetState(c, as, crv1alpha1.StateComplete) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check if the artifacts got updated correctly as, _ = s.crCli.ActionSets(as.GetNamespace()).Get(ctx, as.GetName(), metav1.GetOptions{}) arts := as.Status.Actions[0].Artifacts - c.Assert(arts, IsNil) + c.Assert(arts, check.IsNil) arts = as.Status.Actions[1].Artifacts - c.Assert(arts, NotNil) - c.Assert(arts, HasLen, 1) + c.Assert(arts, check.NotNil) + c.Assert(arts, check.HasLen, 1) keyVal := arts["myArt"].KeyValue - c.Assert(keyVal, DeepEquals, map[string]string{"key": "myValue"}) + c.Assert(keyVal, check.DeepEquals, map[string]string{"key": "myValue"}) } -func (s *ControllerSuite) TestPhaseOutputAsKopiaSnapshot(c *C) { +func (s *ControllerSuite) TestPhaseOutputAsKopiaSnapshot(c *check.C) { ctx := context.Background() // Create a blueprint that uses func output as kopia snapshot bp := newBPWithKopiaSnapshotOutputArtifact() bp = testutil.BlueprintWithConfigMap(bp) bp, err := s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Add an actionset that references that blueprint. as := testutil.NewTestActionSet(s.namespace, bp.GetName(), "Deployment", s.deployment.GetName(), s.namespace, kanister.DefaultVersion, testAction) as = testutil.ActionSetWithConfigMap(as, s.confimap.GetName()) as, err = s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, as, crv1alpha1.StateRunning) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check if the func returned expected output - c.Assert(testutil.OutputFuncOut(), DeepEquals, map[string]interface{}{"key": "myValue"}) + c.Assert(testutil.OutputFuncOut(), check.DeepEquals, map[string]interface{}{"key": "myValue"}) err = s.waitOnActionSetState(c, as, crv1alpha1.StateComplete) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check if the artifacts got updated correctly as, _ = s.crCli.ActionSets(as.GetNamespace()).Get(ctx, as.GetName(), metav1.GetOptions{}) arts := as.Status.Actions[0].Artifacts - c.Assert(arts, NotNil) - c.Assert(arts, HasLen, 1) + c.Assert(arts, check.NotNil) + c.Assert(arts, check.HasLen, 1) kopiaSnapshot := arts["myArt"].KopiaSnapshot - c.Assert(kopiaSnapshot, Equals, "myValue") + c.Assert(kopiaSnapshot, check.Equals, "myValue") } -func (s *ControllerSuite) TestActionSetExecWithoutProfile(c *C) { +func (s *ControllerSuite) TestActionSetExecWithoutProfile(c *check.C) { ctx := context.Background() // Create a blueprint that uses func output as artifact bp := newBPWithOutputArtifact() bp = testutil.BlueprintWithConfigMap(bp) bp, err := s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Add an actionset that references that blueprint. as := &crv1alpha1.ActionSet{ @@ -1018,73 +1018,73 @@ func (s *ControllerSuite) TestActionSetExecWithoutProfile(c *C) { } as = testutil.ActionSetWithConfigMap(as, s.confimap.GetName()) as, err = s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, as, crv1alpha1.StateRunning) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check if the func returned expected output - c.Assert(testutil.OutputFuncOut(), DeepEquals, map[string]interface{}{"key": "myValue"}) + c.Assert(testutil.OutputFuncOut(), check.DeepEquals, map[string]interface{}{"key": "myValue"}) err = s.waitOnActionSetState(c, as, crv1alpha1.StateComplete) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check if the artifacts got updated correctly as, _ = s.crCli.ActionSets(as.GetNamespace()).Get(ctx, as.GetName(), metav1.GetOptions{}) arts := as.Status.Actions[0].Artifacts - c.Assert(arts, NotNil) - c.Assert(arts, HasLen, 1) + c.Assert(arts, check.NotNil) + c.Assert(arts, check.HasLen, 1) keyVal := arts["myArt"].KeyValue - c.Assert(keyVal, DeepEquals, map[string]string{"key": "myValue"}) + c.Assert(keyVal, check.DeepEquals, map[string]string{"key": "myValue"}) } -func (s *ControllerSuite) TestRenderArtifactsFailure(c *C) { +func (s *ControllerSuite) TestRenderArtifactsFailure(c *check.C) { ctx := context.Background() bp := newBPWithFakeOutputArtifact() bp = testutil.BlueprintWithConfigMap(bp) bp, err := s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Add an actionset that references that blueprint. as := testutil.NewTestActionSet(s.namespace, bp.GetName(), "Deployment", s.deployment.GetName(), s.namespace, kanister.DefaultVersion, testAction) as = testutil.ActionSetWithConfigMap(as, s.confimap.GetName()) as, err = s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, as, crv1alpha1.StateRunning) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) - c.Assert(testutil.OutputFuncOut(), DeepEquals, map[string]interface{}{"key": "myValue"}) + c.Assert(testutil.OutputFuncOut(), check.DeepEquals, map[string]interface{}{"key": "myValue"}) err = s.waitOnActionSetState(c, as, crv1alpha1.StateFailed) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *ControllerSuite) TestProgressRunningPhase(c *C) { +func (s *ControllerSuite) TestProgressRunningPhase(c *check.C) { err := os.Setenv(kube.PodNSEnvVar, "test") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctx := context.Background() bp := newBPForProgressRunningPhase() bp, err = s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // create actionset and wait for it to reach Running state as := testutil.NewTestActionSet(s.namespace, bp.GetName(), "Deployment", s.deployment.GetName(), s.namespace, kanister.DefaultVersion, "backup") as, err = s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitOnActionSetState(c, as, crv1alpha1.StateRunning) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) runningPhases := sets.Set[string]{} runningPhases.Insert("backupPhaseOne").Insert("backupPhaseTwo").Insert("deferPhase") err = s.waitOnActionSetCompleteWithRunningPhases(as, &runningPhases) - c.Assert(err, IsNil) - c.Assert(runningPhases, HasLen, 0) + c.Assert(err, check.IsNil) + c.Assert(runningPhases, check.HasLen, 0) } -func (s *ControllerSuite) TestGetActionTypeBucket(c *C) { +func (s *ControllerSuite) TestGetActionTypeBucket(c *check.C) { for _, tc := range []struct { actionType string }{ @@ -1133,33 +1133,33 @@ func (s *ControllerSuite) TestGetActionTypeBucket(c *C) { } { switch tc.actionType { case ActionTypeBackup: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypeBackup) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypeBackup) case ActionTypeRestore: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypeRestore) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypeRestore) case ActionTypeDelete: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypeDelete) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypeDelete) case ActionTypeBackupToServer: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypeBackupToServer) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypeBackupToServer) case ActionTypeRestoreFromServer: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypeRestoreFromServer) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypeRestoreFromServer) case ActionTypeBeforeBackup: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypeBeforeBackup) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypeBeforeBackup) case ActionTypeOnSuccess: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypeOnSuccess) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypeOnSuccess) case ActionTypeOnFailure: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypeOnFailure) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypeOnFailure) case ActionTypePreRestore: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypePreRestore) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypePreRestore) case ActionTypePostRestore: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypePostRestore) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypePostRestore) case ActionTypePostRestoreFailed: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypePostRestoreFailed) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypePostRestoreFailed) case ActionTypeBackupPrehook: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypeBackupPrehook) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypeBackupPrehook) case ActionTypeBackupPosthook: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypeBackupPosthook) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypeBackupPosthook) default: - c.Assert(getActionTypeBucket(tc.actionType), Equals, ActionTypeBackupOther) + c.Assert(getActionTypeBucket(tc.actionType), check.Equals, ActionTypeBackupOther) } } } diff --git a/pkg/controllers/repositoryserver/repository_test.go b/pkg/controllers/repositoryserver/repository_test.go index 080590cbe3..e5fffa5aa9 100644 --- a/pkg/controllers/repositoryserver/repository_test.go +++ b/pkg/controllers/repositoryserver/repository_test.go @@ -15,7 +15,7 @@ package repositoryserver import ( - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "sigs.k8s.io/controller-runtime/pkg/reconcile" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -23,7 +23,7 @@ import ( "github.com/kanisterio/kanister/pkg/testutil" ) -func (s *RepoServerControllerSuite) TestCacheSizeConfiguration(c *C) { +func (s *RepoServerControllerSuite) TestCacheSizeConfiguration(c *check.C) { repositoryServer := testutil.GetTestKopiaRepositoryServerCR(s.repoServerControllerNamespace) setRepositoryServerSecretsInCR(&s.repoServerSecrets, &repositoryServer) @@ -38,8 +38,8 @@ func (s *RepoServerControllerSuite) TestCacheSizeConfiguration(c *C) { // Test if Default cache size settings are set cacheSizeSettings := repoServerHandler.getRepositoryCacheSettings() - c.Assert(*cacheSizeSettings.Content, Equals, defaultcontentCacheMB) - c.Assert(*cacheSizeSettings.Metadata, Equals, defaultmetadataCacheMB) + c.Assert(*cacheSizeSettings.Content, check.Equals, defaultcontentCacheMB) + c.Assert(*cacheSizeSettings.Metadata, check.Equals, defaultmetadataCacheMB) customCacheMetadataSize := 1000 customCacheContentSize := 1100 @@ -49,8 +49,8 @@ func (s *RepoServerControllerSuite) TestCacheSizeConfiguration(c *C) { Content: &customCacheContentSize, } cacheSizeSettings = repoServerHandler.getRepositoryCacheSettings() - c.Assert(*cacheSizeSettings.Content, Equals, 1100) - c.Assert(*cacheSizeSettings.Metadata, Equals, 1000) + c.Assert(*cacheSizeSettings.Content, check.Equals, 1100) + c.Assert(*cacheSizeSettings.Metadata, check.Equals, 1000) // Check if default Content Cache size is set repositoryServer.Spec.Repository.CacheSizeSettings = crv1alpha1.CacheSizeSettings{ @@ -58,8 +58,8 @@ func (s *RepoServerControllerSuite) TestCacheSizeConfiguration(c *C) { Content: nil, } cacheSizeSettings = repoServerHandler.getRepositoryCacheSettings() - c.Assert(*cacheSizeSettings.Content, Equals, defaultcontentCacheMB) - c.Assert(*cacheSizeSettings.Metadata, Equals, 1000) + c.Assert(*cacheSizeSettings.Content, check.Equals, defaultcontentCacheMB) + c.Assert(*cacheSizeSettings.Metadata, check.Equals, 1000) // Check if default Metadata Cache size is set repositoryServer.Spec.Repository.CacheSizeSettings = crv1alpha1.CacheSizeSettings{ @@ -67,11 +67,11 @@ func (s *RepoServerControllerSuite) TestCacheSizeConfiguration(c *C) { Content: &customCacheContentSize, } cacheSizeSettings = repoServerHandler.getRepositoryCacheSettings() - c.Assert(*cacheSizeSettings.Content, Equals, 1100) - c.Assert(*cacheSizeSettings.Metadata, Equals, defaultmetadataCacheMB) + c.Assert(*cacheSizeSettings.Content, check.Equals, 1100) + c.Assert(*cacheSizeSettings.Metadata, check.Equals, defaultmetadataCacheMB) } -func (s *RepoServerControllerSuite) TestConfigFileAndLogDirectoryConfiguration(c *C) { +func (s *RepoServerControllerSuite) TestConfigFileAndLogDirectoryConfiguration(c *check.C) { repositoryServer := testutil.GetTestKopiaRepositoryServerCR(s.repoServerControllerNamespace) setRepositoryServerSecretsInCR(&s.repoServerSecrets, &repositoryServer) @@ -84,9 +84,9 @@ func (s *RepoServerControllerSuite) TestConfigFileAndLogDirectoryConfiguration(c // Check if default values for log directory,config file path and cache directory are set configuration := repoServerHandler.getRepositoryConfiguration() - c.Assert(configuration.ConfigFilePath, Equals, command.DefaultConfigFilePath) - c.Assert(configuration.LogDirectory, Equals, command.DefaultLogDirectory) - c.Assert(configuration.CacheDirectory, Equals, command.DefaultCacheDirectory) + c.Assert(configuration.ConfigFilePath, check.Equals, command.DefaultConfigFilePath) + c.Assert(configuration.LogDirectory, check.Equals, command.DefaultLogDirectory) + c.Assert(configuration.CacheDirectory, check.Equals, command.DefaultCacheDirectory) // Check if custom values for log directory,config file path and cache directory are set repositoryServer.Spec.Repository.Configuration.ConfigFilePath = "/tmp/test-config" @@ -94,7 +94,7 @@ func (s *RepoServerControllerSuite) TestConfigFileAndLogDirectoryConfiguration(c repositoryServer.Spec.Repository.Configuration.CacheDirectory = "/tmp/test-cache-directory" configuration = repoServerHandler.getRepositoryConfiguration() - c.Assert(configuration.ConfigFilePath, Equals, "/tmp/test-config") - c.Assert(configuration.LogDirectory, Equals, "/tmp/test-log-directory") - c.Assert(configuration.CacheDirectory, Equals, "/tmp/test-cache-directory") + c.Assert(configuration.ConfigFilePath, check.Equals, "/tmp/test-config") + c.Assert(configuration.LogDirectory, check.Equals, "/tmp/test-log-directory") + c.Assert(configuration.CacheDirectory, check.Equals, "/tmp/test-cache-directory") } diff --git a/pkg/controllers/repositoryserver/repositoryserver_controller_test.go b/pkg/controllers/repositoryserver/repositoryserver_controller_test.go index 1d9e10ec1b..67832d59ec 100644 --- a/pkg/controllers/repositoryserver/repositoryserver_controller_test.go +++ b/pkg/controllers/repositoryserver/repositoryserver_controller_test.go @@ -24,7 +24,7 @@ import ( "time" "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" k8sresource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -56,7 +56,7 @@ const ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type RepoServerControllerSuite struct { crCli crclientv1alpha1.CrV1alpha1Interface @@ -75,29 +75,29 @@ type patchStringValue struct { Value string `json:"value"` } -var _ = Suite(&RepoServerControllerSuite{}) +var _ = check.Suite(&RepoServerControllerSuite{}) -func (s *RepoServerControllerSuite) SetUpSuite(c *C) { +func (s *RepoServerControllerSuite) SetUpSuite(c *check.C) { config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.k8sServerVersion, err = discoveryClient.ServerVersion() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cli, err := kubernetes.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) crCli, err := crclientv1alpha1.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctx, cancel := context.WithCancel(context.TODO()) // Make sure the CRDs exist. err = resource.CreateCustomResources(ctx, config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = resource.CreateRepoServerCustomResource(ctx, config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.kubeCli = cli s.crCli = crCli @@ -112,14 +112,14 @@ func (s *RepoServerControllerSuite) SetUpSuite(c *C) { }, } cns, err := s.kubeCli.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.repoServerControllerNamespace = cns.Name ws := webhook.NewServer(webhook.Options{Port: 9443}) // Since we are not creating the controller in a pod // the repository server controller needs few env variables set explicitly err = os.Setenv("POD_NAMESPACE", s.repoServerControllerNamespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) mgr, err := ctrl.NewManager(config, ctrl.Options{ Scheme: scheme, @@ -127,7 +127,7 @@ func (s *RepoServerControllerSuite) SetUpSuite(c *C) { Metrics: server.Options{BindAddress: "0"}, LeaderElection: false, }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) repoReconciler := &RepositoryServerReconciler{ Client: mgr.GetClient(), @@ -135,28 +135,28 @@ func (s *RepoServerControllerSuite) SetUpSuite(c *C) { } err = repoReconciler.SetupWithManager(mgr) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Since the manager is not started inside a pod, // the controller needs a pod reference to start successfully podSpec := getTestKanisterToolsPod(controllerPodName) _, err = cli.CoreV1().Pods(s.repoServerControllerNamespace).Create(ctx, podSpec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitForPodReady(ctx, s.kubeCli, s.repoServerControllerNamespace, controllerPodName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) go func(ctx context.Context) { // Env setup required to start the controller service // We need to set this up since we are not creating controller in a pod err := os.Setenv("HOSTNAME", controllerPodName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = os.Setenv("POD_SERVICE_ACCOUNT", defaultServiceAccount) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Set KANISTER_TOOLS env to override and use dev image err = os.Setenv(consts.KanisterToolsImageEnvName, consts.LatestKanisterToolsImage) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = mgr.Start(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }(ctx) s.DefaultRepoServerReconciler = repoReconciler @@ -164,28 +164,28 @@ func (s *RepoServerControllerSuite) SetUpSuite(c *C) { s.createRepositoryServerSecrets(c) } -func (s *RepoServerControllerSuite) createRepositoryServerSecrets(c *C) { +func (s *RepoServerControllerSuite) createRepositoryServerSecrets(c *check.C) { kopiaTLSSecretData, err := testutil.GetKopiaTLSSecretData() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.repoServerSecrets = repositoryServerSecrets{} s.repoServerSecrets.serverUserAccess, err = s.CreateRepositoryServerUserAccessSecret(testutil.GetRepoServerUserAccessSecretData("localhost", testutil.KopiaRepositoryServerAccessPassword)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.repoServerSecrets.serverAdmin, err = s.CreateRepositoryServerAdminSecret(testutil.GetRepoServerAdminSecretData(testutil.KopiaRepositoryServerAdminUser, testutil.KopiaRepositoryServerAdminPassword)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.repoServerSecrets.repositoryPassword, err = s.CreateRepositoryPasswordSecret(testutil.GetRepoPasswordSecretData(testutil.KopiaRepositoryPassword)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.repoServerSecrets.serverTLS, err = s.CreateKopiaTLSSecret(kopiaTLSSecretData) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.repoServerSecrets.storage, err = s.CreateStorageLocationSecret(testutil.GetDefaultS3CompliantStorageLocation()) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.repoServerSecrets.storageCredentials, err = s.CreateAWSStorageCredentialsSecret(testutil.GetDefaultS3StorageCreds(c)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } func (s *RepoServerControllerSuite) CreateRepositoryServerAdminSecret(data map[string][]byte) (se *corev1.Secret, err error) { @@ -220,9 +220,9 @@ func (s *RepoServerControllerSuite) CreateGCPStorageCredentialsSecret(data map[s return testutil.CreateSecret(s.kubeCli, s.repoServerControllerNamespace, "test-repository-server-storage-creds-", corev1.SecretType(secrets.GCPSecretType), data) } -func (s *RepoServerControllerSuite) TestRepositoryServerImmutability(c *C) { +func (s *RepoServerControllerSuite) TestRepositoryServerImmutability(c *check.C) { minorVersion, err := strconv.Atoi(s.k8sServerVersion.Minor) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) if s.k8sServerVersion.Major == "1" && minorVersion < 25 { c.Skip("skipping the test since CRD validation rules feature is enabled only after k8s version 1.25") @@ -235,7 +235,7 @@ func (s *RepoServerControllerSuite) TestRepositoryServerImmutability(c *C) { // Create a repository server CR repoServerCRCreated, err := s.crCli.RepositoryServers(s.repoServerControllerNamespace).Create(ctx, &repoServerCR, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Update the repository server CR's Immutable field. patch := []patchStringValue{{ @@ -245,118 +245,118 @@ func (s *RepoServerControllerSuite) TestRepositoryServerImmutability(c *C) { }} patchBytes, _ := json.Marshal(patch) _, err = s.crCli.RepositoryServers(s.repoServerControllerNamespace).Patch(ctx, repoServerCRCreated.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{}) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) // Check Error Message expectedErrorMessage := fmt.Sprintf("RepositoryServer.cr.kanister.io \"%s\" is invalid: spec.repository.rootPath: Invalid value: \"string\": Value is immutable", repoServerCRCreated.GetName()) - c.Assert(err.Error(), Equals, expectedErrorMessage) + c.Assert(err.Error(), check.Equals, expectedErrorMessage) // Delete the repository server CR. err = s.crCli.RepositoryServers(s.repoServerControllerNamespace).Delete(ctx, repoServerCRCreated.Name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } // TestRepositoryServerStatusIsServerReady creates a CR with correct configurations and // tests that the CR gets into created/ready state -func (s *RepoServerControllerSuite) TestRepositoryServerStatusIsServerReady(c *C) { +func (s *RepoServerControllerSuite) TestRepositoryServerStatusIsServerReady(c *check.C) { ctx := context.Background() repoServerCR := testutil.GetTestKopiaRepositoryServerCR(s.repoServerControllerNamespace) setRepositoryServerSecretsInCR(&s.repoServerSecrets, &repoServerCR) repoServerCRCreated, err := s.crCli.RepositoryServers(s.repoServerControllerNamespace).Create(ctx, &repoServerCR, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForRepoServerInfoUpdateInCR(repoServerCRCreated.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Get repository server CR with the updated server information repoServerCRCreated, err = s.crCli.RepositoryServers(s.repoServerControllerNamespace).Get(ctx, repoServerCRCreated.Name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitForPodReady(ctx, s.kubeCli, s.repoServerControllerNamespace, repoServerCRCreated.Status.ServerInfo.PodName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = testutil.CreateTestKopiaRepository(ctx, s.kubeCli, repoServerCRCreated, testutil.GetDefaultS3CompliantStorageLocation()) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = s.waitOnRepositoryServerState(c, repoServerCRCreated.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.crCli.RepositoryServers(s.repoServerControllerNamespace).Delete(context.Background(), repoServerCRCreated.Name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } // TestRepositoryServerCRStateWithoutSecrets checks if server creation is failed // when no storage secrets are set -func (s *RepoServerControllerSuite) TestRepositoryServerCRStateWithoutSecrets(c *C) { +func (s *RepoServerControllerSuite) TestRepositoryServerCRStateWithoutSecrets(c *check.C) { repoServerCR := testutil.GetTestKopiaRepositoryServerCR(s.repoServerControllerNamespace) ctx := context.Background() repoServerCRCreated, err := s.crCli.RepositoryServers(s.repoServerControllerNamespace).Create(ctx, &repoServerCR, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) state, err := s.waitOnRepositoryServerState(c, repoServerCRCreated.Name) - c.Assert(err, NotNil) - c.Assert(state, Equals, crv1alpha1.Failed) + c.Assert(err, check.NotNil) + c.Assert(state, check.Equals, crv1alpha1.Failed) err = s.crCli.RepositoryServers(s.repoServerControllerNamespace).Delete(context.Background(), repoServerCRCreated.Name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } // TestCreationOfOwnedResources checks if pod and service for repository server // is created successfully -func (s *RepoServerControllerSuite) TestCreationOfOwnedResources(c *C) { +func (s *RepoServerControllerSuite) TestCreationOfOwnedResources(c *check.C) { ctx := context.Background() repoServerCR := testutil.GetTestKopiaRepositoryServerCR(s.repoServerControllerNamespace) setRepositoryServerSecretsInCR(&s.repoServerSecrets, &repoServerCR) repoServerCRCreated, err := s.crCli.RepositoryServers(s.repoServerControllerNamespace).Create(ctx, &repoServerCR, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForRepoServerInfoUpdateInCR(repoServerCRCreated.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Get repository server CR with the updated server information repoServerCRCreated, err = s.crCli.RepositoryServers(s.repoServerControllerNamespace).Get(ctx, repoServerCRCreated.Name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pod, err := s.kubeCli.CoreV1().Pods(s.repoServerControllerNamespace).Get(ctx, repoServerCRCreated.Status.ServerInfo.PodName, metav1.GetOptions{}) - c.Assert(err, IsNil) - c.Assert(len(pod.OwnerReferences), Equals, 1) - c.Assert(pod.OwnerReferences[0].UID, Equals, repoServerCRCreated.UID) + c.Assert(err, check.IsNil) + c.Assert(len(pod.OwnerReferences), check.Equals, 1) + c.Assert(pod.OwnerReferences[0].UID, check.Equals, repoServerCRCreated.UID) service, err := s.kubeCli.CoreV1().Services(s.repoServerControllerNamespace).Get(ctx, repoServerCRCreated.Status.ServerInfo.ServiceName, metav1.GetOptions{}) - c.Assert(err, IsNil) - c.Assert(len(service.OwnerReferences), Equals, 1) - c.Assert(service.OwnerReferences[0].UID, Equals, repoServerCRCreated.UID) + c.Assert(err, check.IsNil) + c.Assert(len(service.OwnerReferences), check.Equals, 1) + c.Assert(service.OwnerReferences[0].UID, check.Equals, repoServerCRCreated.UID) err = s.crCli.RepositoryServers(s.repoServerControllerNamespace).Delete(context.Background(), repoServerCRCreated.Name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *RepoServerControllerSuite) TestInvalidRepositoryPassword(c *C) { +func (s *RepoServerControllerSuite) TestInvalidRepositoryPassword(c *check.C) { ctx := context.Background() repoServerCR := testutil.GetTestKopiaRepositoryServerCR(s.repoServerControllerNamespace) setRepositoryServerSecretsInCR(&s.repoServerSecrets, &repoServerCR) InvalidRepositoryPassword, err := s.CreateRepositoryPasswordSecret(testutil.GetRepoPasswordSecretData("invalidPassword")) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) repoServerCR.Spec.Repository.PasswordSecretRef.Name = InvalidRepositoryPassword.Name repoServerCR.Spec.Repository.PasswordSecretRef.Namespace = InvalidRepositoryPassword.Namespace repoServerCRCreated, err := s.crCli.RepositoryServers(s.repoServerControllerNamespace).Create(ctx, &repoServerCR, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) state, err := s.waitOnRepositoryServerState(c, repoServerCRCreated.Name) - c.Assert(err, NotNil) - c.Assert(state, Equals, crv1alpha1.Failed) + c.Assert(err, check.NotNil) + c.Assert(state, check.Equals, crv1alpha1.Failed) err = s.crCli.RepositoryServers(s.repoServerControllerNamespace).Delete(context.Background(), repoServerCRCreated.Name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *RepoServerControllerSuite) TestInvalidStorageLocation(c *C) { +func (s *RepoServerControllerSuite) TestInvalidStorageLocation(c *check.C) { ctx := context.Background() repoServerCR := testutil.GetTestKopiaRepositoryServerCR(s.repoServerControllerNamespace) setRepositoryServerSecretsInCR(&s.repoServerSecrets, &repoServerCR) @@ -365,23 +365,23 @@ func (s *RepoServerControllerSuite) TestInvalidStorageLocation(c *C) { storageLocationData[repositoryserver.BucketKey] = []byte("invalidbucket") InvalidStorageLocationSecret, err := s.CreateStorageLocationSecret(storageLocationData) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) repoServerCR.Spec.Storage.SecretRef.Name = InvalidStorageLocationSecret.Name repoServerCR.Spec.Storage.SecretRef.Namespace = InvalidStorageLocationSecret.Namespace repoServerCRCreated, err := s.crCli.RepositoryServers(s.repoServerControllerNamespace).Create(ctx, &repoServerCR, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) state, err := s.waitOnRepositoryServerState(c, repoServerCRCreated.Name) - c.Assert(err, NotNil) - c.Assert(state, Equals, crv1alpha1.Failed) + c.Assert(err, check.NotNil) + c.Assert(state, check.Equals, crv1alpha1.Failed) err = s.crCli.RepositoryServers(s.repoServerControllerNamespace).Delete(context.Background(), repoServerCRCreated.Name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *RepoServerControllerSuite) TestInvalidStorageLocationCredentials(c *C) { +func (s *RepoServerControllerSuite) TestInvalidStorageLocationCredentials(c *check.C) { ctx := context.Background() repoServerCR := testutil.GetTestKopiaRepositoryServerCR(s.repoServerControllerNamespace) setRepositoryServerSecretsInCR(&s.repoServerSecrets, &repoServerCR) @@ -390,23 +390,23 @@ func (s *RepoServerControllerSuite) TestInvalidStorageLocationCredentials(c *C) storageLocationCredsData[secrets.AWSAccessKeyID] = []byte("testaccesskey") InvalidStorageLocationCrdesSecret, err := s.CreateAWSStorageCredentialsSecret(storageLocationCredsData) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) repoServerCR.Spec.Storage.CredentialSecretRef.Name = InvalidStorageLocationCrdesSecret.Name repoServerCR.Spec.Storage.CredentialSecretRef.Namespace = InvalidStorageLocationCrdesSecret.Namespace repoServerCRCreated, err := s.crCli.RepositoryServers(s.repoServerControllerNamespace).Create(ctx, &repoServerCR, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) state, err := s.waitOnRepositoryServerState(c, repoServerCRCreated.Name) - c.Assert(err, NotNil) - c.Assert(state, Equals, crv1alpha1.Failed) + c.Assert(err, check.NotNil) + c.Assert(state, check.Equals, crv1alpha1.Failed) err = s.crCli.RepositoryServers(s.repoServerControllerNamespace).Delete(context.Background(), repoServerCRCreated.Name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *RepoServerControllerSuite) TestFilestoreLocationVolumeMountOnRepoServerPod(c *C) { +func (s *RepoServerControllerSuite) TestFilestoreLocationVolumeMountOnRepoServerPod(c *check.C) { var err error ctx := context.Background() repoServerCR := testutil.GetTestKopiaRepositoryServerCR(s.repoServerControllerNamespace) @@ -425,27 +425,27 @@ func (s *RepoServerControllerSuite) TestFilestoreLocationVolumeMountOnRepoServer }, } pvc, err = s.kubeCli.CoreV1().PersistentVolumeClaims(s.repoServerControllerNamespace).Create(ctx, pvc, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) storageSecret, err := s.CreateStorageLocationSecret(testutil.GetFileStoreLocationSecretData(pvc.Name)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) repoServerCR.Spec.Storage.SecretRef.Name = storageSecret.Name repoServerCRCreated, err := s.crCli.RepositoryServers(s.repoServerControllerNamespace).Create(ctx, &repoServerCR, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForRepoServerInfoUpdateInCR(repoServerCRCreated.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Get repository server CR with the updated server information repoServerCRCreated, err = s.crCli.RepositoryServers(s.repoServerControllerNamespace).Get(ctx, repoServerCRCreated.Name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pod, err := s.kubeCli.CoreV1().Pods(s.repoServerControllerNamespace).Get(ctx, repoServerCRCreated.Status.ServerInfo.PodName, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) - c.Assert(len(pod.Spec.Volumes), Equals, 3) + c.Assert(len(pod.Spec.Volumes), check.Equals, 3) var volumeattached bool for _, vol := range pod.Spec.Volumes { @@ -453,10 +453,10 @@ func (s *RepoServerControllerSuite) TestFilestoreLocationVolumeMountOnRepoServer volumeattached = true } } - c.Assert(volumeattached, Equals, true) + c.Assert(volumeattached, check.Equals, true) err = s.crCli.RepositoryServers(s.repoServerControllerNamespace).Delete(context.Background(), repoServerCRCreated.Name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } func (s *RepoServerControllerSuite) waitForRepoServerInfoUpdateInCR(repoServerName string) error { @@ -486,7 +486,7 @@ func (s *RepoServerControllerSuite) waitForRepoServerInfoUpdateInCR(repoServerNa return err } -func (s *RepoServerControllerSuite) waitOnRepositoryServerState(c *C, reposerverName string) (crv1alpha1.RepositoryServerProgress, error) { +func (s *RepoServerControllerSuite) waitOnRepositoryServerState(c *check.C, reposerverName string) (crv1alpha1.RepositoryServerProgress, error) { ctxTimeout := 10 * time.Minute ctx, cancel := context.WithTimeout(context.Background(), ctxTimeout) defer cancel() @@ -556,12 +556,12 @@ func getTestKanisterToolsPod(podName string) (pod *corev1.Pod) { } } -func (s *RepoServerControllerSuite) TearDownSuite(c *C) { +func (s *RepoServerControllerSuite) TearDownSuite(c *check.C) { err := os.Unsetenv(consts.KanisterToolsImageEnvName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) if s.repoServerControllerNamespace != "" { err := s.kubeCli.CoreV1().Namespaces().Delete(context.TODO(), s.repoServerControllerNamespace, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } if s.cancel != nil { s.cancel() diff --git a/pkg/controllers/repositoryserver/secrets_manager_test.go b/pkg/controllers/repositoryserver/secrets_manager_test.go index 420754e1f5..18b562012d 100644 --- a/pkg/controllers/repositoryserver/secrets_manager_test.go +++ b/pkg/controllers/repositoryserver/secrets_manager_test.go @@ -17,13 +17,13 @@ package repositoryserver import ( "context" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/kanisterio/kanister/pkg/testutil" ) -func (s *RepoServerControllerSuite) TestFetchSecretsForRepositoryServer(c *C) { +func (s *RepoServerControllerSuite) TestFetchSecretsForRepositoryServer(c *check.C) { // Test getSecretsFromCR is successful repositoryServer := testutil.GetTestKopiaRepositoryServerCR(s.repoServerControllerNamespace) setRepositoryServerSecretsInCR(&s.repoServerSecrets, &repositoryServer) @@ -36,19 +36,19 @@ func (s *RepoServerControllerSuite) TestFetchSecretsForRepositoryServer(c *C) { } err := repoServerHandler.getSecretsFromCR(context.Background()) - c.Assert(err, IsNil) - c.Assert(repoServerHandler.RepositoryServerSecrets, NotNil) - c.Assert(repoServerHandler.RepositoryServerSecrets.repositoryPassword, DeepEquals, s.repoServerSecrets.repositoryPassword) - c.Assert(repoServerHandler.RepositoryServerSecrets.storage, DeepEquals, s.repoServerSecrets.storage) - c.Assert(repoServerHandler.RepositoryServerSecrets.storageCredentials, DeepEquals, s.repoServerSecrets.storageCredentials) - c.Assert(repoServerHandler.RepositoryServerSecrets.serverAdmin, DeepEquals, s.repoServerSecrets.serverAdmin) - c.Assert(repoServerHandler.RepositoryServerSecrets.serverTLS, DeepEquals, s.repoServerSecrets.serverTLS) - c.Assert(repoServerHandler.RepositoryServerSecrets.serverUserAccess, DeepEquals, s.repoServerSecrets.serverUserAccess) + c.Assert(err, check.IsNil) + c.Assert(repoServerHandler.RepositoryServerSecrets, check.NotNil) + c.Assert(repoServerHandler.RepositoryServerSecrets.repositoryPassword, check.DeepEquals, s.repoServerSecrets.repositoryPassword) + c.Assert(repoServerHandler.RepositoryServerSecrets.storage, check.DeepEquals, s.repoServerSecrets.storage) + c.Assert(repoServerHandler.RepositoryServerSecrets.storageCredentials, check.DeepEquals, s.repoServerSecrets.storageCredentials) + c.Assert(repoServerHandler.RepositoryServerSecrets.serverAdmin, check.DeepEquals, s.repoServerSecrets.serverAdmin) + c.Assert(repoServerHandler.RepositoryServerSecrets.serverTLS, check.DeepEquals, s.repoServerSecrets.serverTLS) + c.Assert(repoServerHandler.RepositoryServerSecrets.serverUserAccess, check.DeepEquals, s.repoServerSecrets.serverUserAccess) // Test getSecretsFromCR is unsuccessful when one of the secrets does not exist in the namespace repositoryServer.Spec.Storage.SecretRef.Name = "SecretDoesNotExist" repoServerHandler.RepositoryServerSecrets = repositoryServerSecrets{} err = repoServerHandler.getSecretsFromCR(context.Background()) - c.Assert(err, NotNil) - c.Assert(repoServerHandler.RepositoryServerSecrets, Equals, repositoryServerSecrets{}) + c.Assert(err, check.NotNil) + c.Assert(repoServerHandler.RepositoryServerSecrets, check.Equals, repositoryServerSecrets{}) } diff --git a/pkg/datamover/datamover_test.go b/pkg/datamover/datamover_test.go index f3a7f9f4f1..db8e359faa 100644 --- a/pkg/datamover/datamover_test.go +++ b/pkg/datamover/datamover_test.go @@ -17,10 +17,10 @@ package datamover import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { - TestingT(t) + check.TestingT(t) } diff --git a/pkg/datamover/profile.go b/pkg/datamover/profile.go index c3182208ad..33f4916ea5 100644 --- a/pkg/datamover/profile.go +++ b/pkg/datamover/profile.go @@ -41,7 +41,7 @@ func (p *profile) Pull(ctx context.Context, sourcePath, destinationPath string) if err != nil { return err } - if err := p.connectToKopiaRepositoryServer(ctx); err != nil { + if err := p.connectToKopiaRepositoryServer(ctx, repository.ReadOnlyAccess); err != nil { return err } return kopiaLocationPull(ctx, kopiaSnap.ID, destinationPath, sourcePath, p.profile.Credential.KopiaServerSecret.Password) @@ -55,7 +55,7 @@ func (p *profile) Pull(ctx context.Context, sourcePath, destinationPath string) func (p *profile) Push(ctx context.Context, sourcePath, destinationPath string) error { if p.profile.Location.Type == crv1alpha1.LocationTypeKopia { - if err := p.connectToKopiaRepositoryServer(ctx); err != nil { + if err := p.connectToKopiaRepositoryServer(ctx, repository.WriteAccess); err != nil { return err } _, err := kopiaLocationPush(ctx, destinationPath, p.outputName, sourcePath, p.profile.Credential.KopiaServerSecret.Password) @@ -74,7 +74,7 @@ func (p *profile) Delete(ctx context.Context, destinationPath string) error { if err != nil { return err } - if err := p.connectToKopiaRepositoryServer(ctx); err != nil { + if err := p.connectToKopiaRepositoryServer(ctx, repository.WriteAccess); err != nil { return err } return kopiaLocationDelete(ctx, kopiaSnap.ID, destinationPath, p.profile.Credential.KopiaServerSecret.Password) @@ -82,7 +82,7 @@ func (p *profile) Delete(ctx context.Context, destinationPath string) error { return locationDelete(ctx, p.profile, destinationPath) } -func (p *profile) connectToKopiaRepositoryServer(ctx context.Context) error { +func (p *profile) connectToKopiaRepositoryServer(ctx context.Context, accessMode repository.AccessMode) error { contentCacheSize := kopia.GetDataStoreGeneralContentCacheSize(p.profile.Credential.KopiaServerSecret.ConnectOptions) metadataCacheSize := kopia.GetDataStoreGeneralMetadataCacheSize(p.profile.Credential.KopiaServerSecret.ConnectOptions) return repository.ConnectToAPIServer( @@ -94,6 +94,7 @@ func (p *profile) connectToKopiaRepositoryServer(ctx context.Context) error { p.profile.Credential.KopiaServerSecret.Username, contentCacheSize, metadataCacheSize, + accessMode, ) } diff --git a/pkg/datamover/profile_test.go b/pkg/datamover/profile_test.go index 1f8af86363..d7f0dcb168 100644 --- a/pkg/datamover/profile_test.go +++ b/pkg/datamover/profile_test.go @@ -19,7 +19,7 @@ import ( "context" "path/filepath" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" "github.com/kanisterio/kanister/pkg/objectstore" @@ -31,11 +31,11 @@ type ProfileSuite struct { location *crv1alpha1.Location } -var _ = Suite(&ProfileSuite{}) +var _ = check.Suite(&ProfileSuite{}) const testContent = "test-content" -func (ps *ProfileSuite) SetUpSuite(c *C) { +func (ps *ProfileSuite) SetUpSuite(c *check.C) { // Set Context as Background ps.ctx = context.Background() @@ -46,35 +46,35 @@ func (ps *ProfileSuite) SetUpSuite(c *C) { } } -func (ps *ProfileSuite) TestLocationOperationsForProfileDataMover(c *C) { +func (ps *ProfileSuite) TestLocationOperationsForProfileDataMover(c *check.C) { p := testutil.ObjectStoreProfileOrSkip(c, objectstore.ProviderTypeS3, *ps.location) dir := c.MkDir() path := filepath.Join(dir, "test-object1.txt") source := bytes.NewBufferString(testContent) err := locationPush(ps.ctx, p, path, source) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) target := bytes.NewBuffer(nil) err = locationPull(ps.ctx, p, path, target) - c.Assert(err, IsNil) - c.Assert(target.String(), Equals, testContent) + c.Assert(err, check.IsNil) + c.Assert(target.String(), check.Equals, testContent) // test deleting single artifact err = locationDelete(ps.ctx, p, path) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // test deleting dir with multiple artifacts source = bytes.NewBufferString(testContent) err = locationPush(ps.ctx, p, path, source) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) path = filepath.Join(dir, "test-object2.txt") source = bytes.NewBufferString(testContent) err = locationPush(ps.ctx, p, path, source) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = locationDelete(ps.ctx, p, dir) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } diff --git a/pkg/datamover/repository_server.go b/pkg/datamover/repository_server.go index f83e422c1f..d3e4f1695d 100644 --- a/pkg/datamover/repository_server.go +++ b/pkg/datamover/repository_server.go @@ -40,7 +40,7 @@ func (rs *repositoryServer) Pull(ctx context.Context, sourcePath, destinationPat if err != nil { return err } - password, err := rs.connectToKopiaRepositoryServer(ctx) + password, err := rs.connectToKopiaRepositoryServer(ctx, repository.ReadOnlyAccess) if err != nil { return err } @@ -48,7 +48,7 @@ func (rs *repositoryServer) Pull(ctx context.Context, sourcePath, destinationPat } func (rs *repositoryServer) Push(ctx context.Context, sourcePath, destinationPath string) error { - password, err := rs.connectToKopiaRepositoryServer(ctx) + password, err := rs.connectToKopiaRepositoryServer(ctx, repository.WriteAccess) if err != nil { return err } @@ -61,14 +61,14 @@ func (rs *repositoryServer) Delete(ctx context.Context, destinationPath string) if err != nil { return err } - password, err := rs.connectToKopiaRepositoryServer(ctx) + password, err := rs.connectToKopiaRepositoryServer(ctx, repository.WriteAccess) if err != nil { return err } return kopiaLocationDelete(ctx, kopiaSnap.ID, destinationPath, password) } -func (rs *repositoryServer) connectToKopiaRepositoryServer(ctx context.Context) (string, error) { +func (rs *repositoryServer) connectToKopiaRepositoryServer(ctx context.Context, accessMode repository.AccessMode) (string, error) { hostname, userPassphrase, err := rs.hostnameAndUserPassphrase() if err != nil { return "", errors.Wrap(err, "Error Retrieving Hostname and User Passphrase from Repository Server") @@ -83,6 +83,7 @@ func (rs *repositoryServer) connectToKopiaRepositoryServer(ctx context.Context) rs.repositoryServer.Username, rs.repositoryServer.ContentCacheMB, rs.repositoryServer.MetadataCacheMB, + accessMode, ) } diff --git a/pkg/datamover/repository_server_test.go b/pkg/datamover/repository_server_test.go index d2ea2f9d0f..cc21af0a84 100644 --- a/pkg/datamover/repository_server_test.go +++ b/pkg/datamover/repository_server_test.go @@ -23,7 +23,7 @@ import ( "strconv" "strings" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/apimachinery/pkg/util/rand" kopiacmd "github.com/kanisterio/kanister/pkg/kopia/command" @@ -53,9 +53,9 @@ const ( TestRepositoryEncryptionKey = "TEST_REPOSITORY_ENCRYPTION_KEY" ) -var _ = Suite(&RepositoryServerSuite{}) +var _ = check.Suite(&RepositoryServerSuite{}) -func (rss *RepositoryServerSuite) SetUpSuite(c *C) { +func (rss *RepositoryServerSuite) SetUpSuite(c *check.C) { // Check if kopia binary exists in PATH if !CommandExists("kopia") { c.Skip("Skipping repository server datamover unit test. Couldn't find kopia binary in the path.") @@ -88,7 +88,7 @@ func (rss *RepositoryServerSuite) SetUpSuite(c *C) { rss.tlsDir = filepath.Join(temp, "tls-"+rand.String(5)) } -func (rss *RepositoryServerSuite) setupKopiaRepositoryServer(c *C) { +func (rss *RepositoryServerSuite) setupKopiaRepositoryServer(c *check.C) { // Setting Up Kopia Repository contentCacheMB, metadataCacheMB := kopiacmd.GetGeneralCacheSizeSettings() repoCommandArgs := kopiacmd.RepositoryCommandArgs{ @@ -110,15 +110,15 @@ func (rss *RepositoryServerSuite) setupKopiaRepositoryServer(c *C) { // First try to connect with Kopia Repository c.Log("Connecting with Kopia Repository...") repoConnectCmd, err := kopiacmd.RepositoryConnectCommand(repoCommandArgs) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = ExecCommand(c, repoConnectCmd...) if err != nil && strings.Contains(err.Error(), "error connecting to repository") { // If connection fails, create Kopia Repository c.Log("Creating Kopia Repository...") repoCreateCmd, err := kopiacmd.RepositoryCreateCommand(repoCommandArgs) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = ExecCommand(c, repoCreateCmd...) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } // Setting Up Kopia Repository Server @@ -140,7 +140,7 @@ func (rss *RepositoryServerSuite) setupKopiaRepositoryServer(c *C) { } serverStartCmd := kopiacmd.ServerStart(serverStartCommandArgs) _, err = ExecCommand(c, serverStartCmd...) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Adding Users to Kopia Repository Server serverAddUserCommandArgs := kopiacmd.ServerAddUserCommandArgs{ @@ -154,11 +154,11 @@ func (rss *RepositoryServerSuite) setupKopiaRepositoryServer(c *C) { } serverAddUserCmd := kopiacmd.ServerAddUser(serverAddUserCommandArgs) _, err = ExecCommand(c, serverAddUserCmd...) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Getting Fingerprint of Kopia Repository Server rss.fingerprint = fingerprintFromTLSCert(c, tlsCertFile) - c.Assert(rss.fingerprint, Not(Equals), "") + c.Assert(rss.fingerprint, check.Not(check.Equals), "") // Refreshing Kopia Repository Server serverRefreshCommandArgs := kopiacmd.ServerRefreshCommandArgs{ @@ -174,7 +174,7 @@ func (rss *RepositoryServerSuite) setupKopiaRepositoryServer(c *C) { } serverRefreshCmd := kopiacmd.ServerRefresh(serverRefreshCommandArgs) _, err = ExecCommand(c, serverRefreshCmd...) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check Server Status serverStatusCommandArgs := kopiacmd.ServerStatusCommandArgs{ @@ -193,14 +193,14 @@ func (rss *RepositoryServerSuite) setupKopiaRepositoryServer(c *C) { if !strings.Contains(out, "IDLE") && out != "" { c.Fail() } - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (rss *RepositoryServerSuite) connectWithTestKopiaRepositoryServer(c *C) error { +func (rss *RepositoryServerSuite) connectWithTestKopiaRepositoryServer(c *check.C) error { // Connect With Kopia Repository Server tlsCertFile := rss.tlsDir + ".cert" tlsCertStr := readTLSCert(c, tlsCertFile) - c.Assert(tlsCertStr, Not(Equals), "") + c.Assert(tlsCertStr, check.Not(check.Equals), "") contentCacheMB, metadataCacheMB := kopiacmd.GetGeneralCacheSizeSettings() return repository.ConnectToAPIServer( rss.ctx, @@ -211,10 +211,11 @@ func (rss *RepositoryServerSuite) connectWithTestKopiaRepositoryServer(c *C) err rss.testUsername, contentCacheMB, metadataCacheMB, + repository.WriteAccess, ) } -func (rss *RepositoryServerSuite) TestLocationOperationsForRepositoryServerDataMover(c *C) { +func (rss *RepositoryServerSuite) TestLocationOperationsForRepositoryServerDataMover(c *check.C) { // Setup Kopia Repository Server rss.setupKopiaRepositoryServer(c) @@ -224,30 +225,30 @@ func (rss *RepositoryServerSuite) TestLocationOperationsForRepositoryServerDataM cmd := exec.Command("touch", filePath) _, err := cmd.Output() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) targetDir := c.MkDir() // Connect with Kopia Repository Server err = rss.connectWithTestKopiaRepositoryServer(c) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Test Kopia Repository Server Location Push snapInfo, err := kopiaLocationPush(rss.ctx, rss.repoPathPrefix, "kandoOutput", sourceDir, rss.testUserPassword) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Test Kopia Repository Server Location Pull err = kopiaLocationPull(rss.ctx, snapInfo.ID, rss.repoPathPrefix, targetDir, rss.testUserPassword) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // TODO : Verify Data is Pulled from the Location (Issue #2230) // Test Kopia Repository Location Delete err = kopiaLocationDelete(rss.ctx, snapInfo.ID, rss.repoPathPrefix, rss.testUserPassword) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Verify Data is Deleted from the Location // Expect an Error while Pulling Data err = kopiaLocationPull(rss.ctx, snapInfo.ID, rss.repoPathPrefix, targetDir, rss.testUserPassword) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } diff --git a/pkg/discovery/crd_test.go b/pkg/discovery/crd_test.go index 9f56c22656..2a28738309 100644 --- a/pkg/discovery/crd_test.go +++ b/pkg/discovery/crd_test.go @@ -17,7 +17,7 @@ package discovery import ( "context" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" crdclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "github.com/kanisterio/kanister/pkg/filter" @@ -26,26 +26,26 @@ import ( type CRDSuite struct{} -var _ = Suite(&CRDSuite{}) +var _ = check.Suite(&CRDSuite{}) -func (s *CRDSuite) TestCRDMatcher(c *C) { +func (s *CRDSuite) TestCRDMatcher(c *check.C) { ctx := context.Background() cfg, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cli, err := crdclient.NewForConfig(cfg) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) g, err := CRDMatcher(ctx, cli) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) gvrs, err := NamespacedGVRs(ctx, cli.Discovery()) - c.Assert(err, IsNil) - c.Assert(gvrs, Not(HasLen), 0) + c.Assert(err, check.IsNil) + c.Assert(gvrs, check.Not(check.HasLen), 0) // We assume there's at least one CRD in the cluster. igvrs := filter.GroupVersionResourceList(gvrs).Include(g) egvrs := filter.GroupVersionResourceList(gvrs).Exclude(g) - c.Assert(igvrs, Not(HasLen), 0) - c.Assert(egvrs, Not(HasLen), 0) - c.Assert(len(igvrs)+len(egvrs), Equals, len(gvrs)) + c.Assert(igvrs, check.Not(check.HasLen), 0) + c.Assert(egvrs, check.Not(check.HasLen), 0) + c.Assert(len(igvrs)+len(egvrs), check.Equals, len(gvrs)) } diff --git a/pkg/discovery/discovery_test.go b/pkg/discovery/discovery_test.go index 3faddd4839..739670cc70 100644 --- a/pkg/discovery/discovery_test.go +++ b/pkg/discovery/discovery_test.go @@ -18,37 +18,37 @@ import ( "context" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/kube" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type DiscoverSuite struct{} -var _ = Suite(&DiscoverSuite{}) +var _ = check.Suite(&DiscoverSuite{}) -func (s *DiscoverSuite) TestDiscover(c *C) { +func (s *DiscoverSuite) TestDiscover(c *check.C) { ctx := context.Background() cli, err := kube.NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) gvrs, err := AllGVRs(ctx, cli.Discovery()) - c.Assert(err, IsNil) - c.Assert(gvrs, Not(HasLen), 0) + c.Assert(err, check.IsNil) + c.Assert(gvrs, check.Not(check.HasLen), 0) for _, gvr := range gvrs { - c.Assert(gvr.Empty(), Equals, false) - c.Assert(gvr.Version, Not(Equals), "") - c.Assert(gvr.Resource, Not(Equals), "") + c.Assert(gvr.Empty(), check.Equals, false) + c.Assert(gvr.Version, check.Not(check.Equals), "") + c.Assert(gvr.Resource, check.Not(check.Equals), "") } gvrs, err = NamespacedGVRs(ctx, cli.Discovery()) - c.Assert(err, IsNil) - c.Assert(gvrs, Not(HasLen), 0) + c.Assert(err, check.IsNil) + c.Assert(gvrs, check.Not(check.HasLen), 0) for _, gvr := range gvrs { - c.Assert(gvr.Empty(), Equals, false) - c.Assert(gvr.Version, Not(Equals), "") - c.Assert(gvr.Resource, Not(Equals), "") + c.Assert(gvr.Empty(), check.Equals, false) + c.Assert(gvr.Version, check.Not(check.Equals), "") + c.Assert(gvr.Resource, check.Not(check.Equals), "") } } diff --git a/pkg/envdir/envdir_test.go b/pkg/envdir/envdir_test.go index 40128f0d8c..b18b8d1128 100644 --- a/pkg/envdir/envdir_test.go +++ b/pkg/envdir/envdir_test.go @@ -19,22 +19,22 @@ import ( "path/filepath" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type EnvDirSuite struct{} -var _ = Suite(&EnvDirSuite{}) +var _ = check.Suite(&EnvDirSuite{}) -func (s *EnvDirSuite) TestEnvDir(c *C) { +func (s *EnvDirSuite) TestEnvDir(c *check.C) { d := c.MkDir() p := filepath.Join(d, "FOO") err := os.WriteFile(p, []byte("BAR"), os.ModePerm) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) e, err := EnvDir(d) - c.Assert(err, IsNil) - c.Assert(e, DeepEquals, []string{"FOO=BAR"}) + c.Assert(err, check.IsNil) + c.Assert(e, check.DeepEquals, []string{"FOO=BAR"}) } diff --git a/pkg/ephemeral/envvar_test.go b/pkg/ephemeral/envvar_test.go index d3fa275ff2..e1c484c6df 100644 --- a/pkg/ephemeral/envvar_test.go +++ b/pkg/ephemeral/envvar_test.go @@ -17,7 +17,7 @@ package ephemeral_test import ( "os" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" "github.com/kanisterio/kanister/pkg/ephemeral" @@ -26,9 +26,9 @@ import ( type EnvVarSuite struct{} -var _ = Suite(&EnvVarSuite{}) +var _ = check.Suite(&EnvVarSuite{}) -func (s *EnvVarSuite) TestOSEnvVarKubePodOptions(c *C) { +func (s *EnvVarSuite) TestOSEnvVarKubePodOptions(c *check.C) { expected := []corev1.EnvVar{ { Name: "KANISTER_REGISTERED_OS_ENVVAR", @@ -43,20 +43,20 @@ func (s *EnvVarSuite) TestOSEnvVarKubePodOptions(c *C) { var options kube.PodOptions registeredAppliers.Apply(&options) - c.Assert(options.EnvironmentVariables, DeepEquals, []corev1.EnvVar(nil)) + c.Assert(options.EnvironmentVariables, check.DeepEquals, []corev1.EnvVar(nil)) // OS environment variable set err := os.Setenv(expected[0].Name, expected[0].Value) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err := os.Unsetenv(expected[0].Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() registeredAppliers.Apply(&options) - c.Assert(options.EnvironmentVariables, DeepEquals, expected) + c.Assert(options.EnvironmentVariables, check.DeepEquals, expected) } -func (s *EnvVarSuite) TestOSEnvVarCoreV1Container(c *C) { +func (s *EnvVarSuite) TestOSEnvVarCoreV1Container(c *check.C) { expected := []corev1.EnvVar{ { Name: "KANISTER_REGISTERED_OS_ENVVAR", @@ -71,21 +71,21 @@ func (s *EnvVarSuite) TestOSEnvVarCoreV1Container(c *C) { var options corev1.Container registeredAppliers.Apply(&options) - c.Assert(options.Env, DeepEquals, []corev1.EnvVar(nil)) + c.Assert(options.Env, check.DeepEquals, []corev1.EnvVar(nil)) // OS environment variable set err := os.Setenv(expected[0].Name, expected[0].Value) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err := os.Unsetenv(expected[0].Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() registeredAppliers.Apply(&options) - c.Assert(options.Env, DeepEquals, expected) + c.Assert(options.Env, check.DeepEquals, expected) } -func (s *EnvVarSuite) TestStaticEnvVarKubePodOptions(c *C) { +func (s *EnvVarSuite) TestStaticEnvVarKubePodOptions(c *check.C) { expected := []corev1.EnvVar{ { Name: "KANISTER_REGISTERED_STATIC_ENVVAR", @@ -99,10 +99,10 @@ func (s *EnvVarSuite) TestStaticEnvVarKubePodOptions(c *C) { var options kube.PodOptions registeredAppliers.Apply(&options) - c.Assert(options.EnvironmentVariables, DeepEquals, expected) + c.Assert(options.EnvironmentVariables, check.DeepEquals, expected) } -func (s *EnvVarSuite) TestRegisteringStaticEnvVarCoreV1Container(c *C) { +func (s *EnvVarSuite) TestRegisteringStaticEnvVarCoreV1Container(c *check.C) { expected := []corev1.EnvVar{ { Name: "KANISTER_REGISTERED_STATIC_ENVVAR", @@ -116,5 +116,5 @@ func (s *EnvVarSuite) TestRegisteringStaticEnvVarCoreV1Container(c *C) { var options corev1.Container registeredAppliers.Apply(&options) - c.Assert(options.Env, DeepEquals, expected) + c.Assert(options.Env, check.DeepEquals, expected) } diff --git a/pkg/ephemeral/ephemeral_test.go b/pkg/ephemeral/ephemeral_test.go index dba38a2153..71c26b2b9d 100644 --- a/pkg/ephemeral/ephemeral_test.go +++ b/pkg/ephemeral/ephemeral_test.go @@ -17,7 +17,7 @@ package ephemeral_test import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" "github.com/kanisterio/kanister/pkg/ephemeral" @@ -25,16 +25,16 @@ import ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type EphemeralSuite struct { OriginalContainer ephemeral.ApplierList[corev1.Container] OriginalPodOptions ephemeral.ApplierList[kube.PodOptions] } -var _ = Suite(&EphemeralSuite{}) +var _ = check.Suite(&EphemeralSuite{}) -func (s *EphemeralSuite) SetUpTest(c *C) { +func (s *EphemeralSuite) SetUpTest(c *check.C) { s.OriginalContainer = ephemeral.Container ephemeral.Container = ephemeral.ApplierList[corev1.Container]{} @@ -42,7 +42,7 @@ func (s *EphemeralSuite) SetUpTest(c *C) { ephemeral.PodOptions = ephemeral.ApplierList[kube.PodOptions]{} } -func (s *EphemeralSuite) TearDownTest(c *C) { +func (s *EphemeralSuite) TearDownTest(c *check.C) { ephemeral.Container = s.OriginalContainer ephemeral.PodOptions = s.OriginalPodOptions } @@ -51,14 +51,14 @@ type TestContainerApplier struct{} func (TestContainerApplier) Apply(*corev1.Container) {} -func (s *EphemeralSuite) TestRegisterContainerApplier(c *C) { +func (s *EphemeralSuite) TestRegisterContainerApplier(c *check.C) { var applier TestContainerApplier - c.Assert(len(ephemeral.Container), Equals, 0) + c.Assert(len(ephemeral.Container), check.Equals, 0) ephemeral.Register(applier) - if c.Check(len(ephemeral.Container), Equals, 1) { - c.Check(ephemeral.Container[0], Equals, applier) + if c.Check(len(ephemeral.Container), check.Equals, 1) { + c.Check(ephemeral.Container[0], check.Equals, applier) } } @@ -66,36 +66,36 @@ type TestPodOptionsApplier struct{} func (TestPodOptionsApplier) Apply(*kube.PodOptions) {} -func (s *EphemeralSuite) TestRegisterPodOptionsApplier(c *C) { +func (s *EphemeralSuite) TestRegisterPodOptionsApplier(c *check.C) { var applier TestPodOptionsApplier - c.Assert(len(ephemeral.PodOptions), Equals, 0) + c.Assert(len(ephemeral.PodOptions), check.Equals, 0) ephemeral.Register(applier) - if c.Check(len(ephemeral.PodOptions), Equals, 1) { - c.Check(ephemeral.PodOptions[0], Equals, applier) + if c.Check(len(ephemeral.PodOptions), check.Equals, 1) { + c.Check(ephemeral.PodOptions[0], check.Equals, applier) } } -func (s *EphemeralSuite) TestRegisterSet(c *C) { +func (s *EphemeralSuite) TestRegisterSet(c *check.C) { set := ephemeral.ApplierSet{ Container: TestContainerApplier{}, PodOptions: TestPodOptionsApplier{}, } - c.Assert(len(ephemeral.Container), Equals, 0) - c.Assert(len(ephemeral.PodOptions), Equals, 0) + c.Assert(len(ephemeral.Container), check.Equals, 0) + c.Assert(len(ephemeral.PodOptions), check.Equals, 0) ephemeral.RegisterSet(set) - if c.Check(len(ephemeral.Container), Equals, 1) { - c.Check(ephemeral.Container[0], Equals, set.Container) + if c.Check(len(ephemeral.Container), check.Equals, 1) { + c.Check(ephemeral.Container[0], check.Equals, set.Container) } - if c.Check(len(ephemeral.PodOptions), Equals, 1) { - c.Check(ephemeral.PodOptions[0], Equals, set.PodOptions) + if c.Check(len(ephemeral.PodOptions), check.Equals, 1) { + c.Check(ephemeral.PodOptions[0], check.Equals, set.PodOptions) } } -func (s *EphemeralSuite) TestFilter(c *C) { +func (s *EphemeralSuite) TestFilter(c *check.C) { applier := ephemeral.Filter( ephemeral.PodOptionsNameFilter("matches"), ephemeral.ApplierFunc[kube.PodOptions](func(options *kube.PodOptions) { @@ -107,23 +107,23 @@ func (s *EphemeralSuite) TestFilter(c *C) { options.Name = "nomatch" applier.Apply(&options) - c.Check(options.Image, Equals, "") + c.Check(options.Image, check.Equals, "") options.Name = "matches" applier.Apply(&options) - c.Check(options.Image, Equals, "applied-image") + c.Check(options.Image, check.Equals, "applied-image") } -func (s *EphemeralSuite) TestContainerNameFilter(c *C) { +func (s *EphemeralSuite) TestContainerNameFilter(c *check.C) { filter := ephemeral.ContainerNameFilter("matches") - c.Check(filter.Filter(&corev1.Container{Name: "matches"}), Equals, true) - c.Check(filter.Filter(&corev1.Container{Name: "nomatch"}), Equals, false) + c.Check(filter.Filter(&corev1.Container{Name: "matches"}), check.Equals, true) + c.Check(filter.Filter(&corev1.Container{Name: "nomatch"}), check.Equals, false) } -func (s *EphemeralSuite) TestPodOptionsNameFilter(c *C) { +func (s *EphemeralSuite) TestPodOptionsNameFilter(c *check.C) { filter := ephemeral.PodOptionsNameFilter("matches") - c.Check(filter.Filter(&kube.PodOptions{Name: "matches"}), Equals, true) - c.Check(filter.Filter(&kube.PodOptions{Name: "nomatch"}), Equals, false) + c.Check(filter.Filter(&kube.PodOptions{Name: "matches"}), check.Equals, true) + c.Check(filter.Filter(&kube.PodOptions{Name: "nomatch"}), check.Equals, false) } diff --git a/pkg/errorchecker/errorchecker_test.go b/pkg/errorchecker/errorchecker_test.go index 7194723fc4..3a12e5a5ef 100644 --- a/pkg/errorchecker/errorchecker_test.go +++ b/pkg/errorchecker/errorchecker_test.go @@ -3,29 +3,28 @@ package errorchecker import ( "testing" - . "gopkg.in/check.v1" - "github.com/kanisterio/errkit" "github.com/pkg/errors" + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type ErrorsTestSuite struct{} -var _ = Suite(&ErrorsTestSuite{}) +var _ = check.Suite(&ErrorsTestSuite{}) -func (ts *ErrorsTestSuite) TestWrappingAndMatching(c *C) { +func (ts *ErrorsTestSuite) TestWrappingAndMatching(c *check.C) { errkitErr := errkit.New("Errkit error") errkitWrappedErr := errkit.Wrap(errkitErr, "errkit wrapped") errorsWrappedErr := errors.Wrap(errkitWrappedErr, "errors wrapped") errorsWrappedErr1 := errors.Wrap(errorsWrappedErr, "errors wrapped 1") // Ensure that errors from 'errkit' wrapped by the older 'errors' package remain matchable. - c.Assert(errors.Is(errorsWrappedErr, errkitErr), Equals, true) + c.Assert(errors.Is(errorsWrappedErr, errkitErr), check.Equals, true) // Ensure that transformation to string still works - c.Assert(errorsWrappedErr1.Error(), Equals, "errors wrapped 1: errors wrapped: errkit wrapped: Errkit error") + c.Assert(errorsWrappedErr1.Error(), check.Equals, "errors wrapped 1: errors wrapped: errkit wrapped: Errkit error") // Ensure that error message matching does work as expected AssertErrorMessage(c, errorsWrappedErr1, ".*errkit wrapped.*") AssertErrorMessage(c, errorsWrappedErr1, ".*Errkit error") diff --git a/pkg/field/field_test.go b/pkg/field/field_test.go index b8ff3bece2..3c71537ff5 100644 --- a/pkg/field/field_test.go +++ b/pkg/field/field_test.go @@ -4,14 +4,14 @@ import ( "context" "fmt" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/field" ) type FieldSuite struct{} -var _ = Suite(&FieldSuite{}) +var _ = check.Suite(&FieldSuite{}) func ExampleNew() { f := field.New("foo", "bar") diff --git a/pkg/filter/filter_test.go b/pkg/filter/filter_test.go index 2c76020431..c442a8531f 100644 --- a/pkg/filter/filter_test.go +++ b/pkg/filter/filter_test.go @@ -17,20 +17,20 @@ package filter import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type FilterSuite struct{} -var _ = Suite(&FilterSuite{}) +var _ = check.Suite(&FilterSuite{}) -func (s *FilterSuite) TestGVRRequirement(c *C) { +func (s *FilterSuite) TestGVRRequirement(c *check.C) { for _, tc := range []struct { gvrr ResourceTypeRequirement gvr schema.GroupVersionResource @@ -135,20 +135,20 @@ func (s *FilterSuite) TestGVRRequirement(c *C) { expected: false, }, } { - c.Check(tc.gvrr.Matches(tc.gvr), Equals, tc.expected, Commentf("GVRR: %v, GVR: %v", tc.gvrr, tc.gvr)) + c.Check(tc.gvrr.Matches(tc.gvr), check.Equals, tc.expected, check.Commentf("GVRR: %v, GVR: %v", tc.gvrr, tc.gvr)) } } -func (s *FilterSuite) TestGroupVersionResourceEmpty(c *C) { +func (s *FilterSuite) TestGroupVersionResourceEmpty(c *check.C) { var g ResourceTypeMatcher - c.Assert(g.Empty(), Equals, true) + c.Assert(g.Empty(), check.Equals, true) g = ResourceTypeMatcher{} - c.Assert(g.Empty(), Equals, true) + c.Assert(g.Empty(), check.Equals, true) g = ResourceTypeMatcher{ResourceTypeRequirement{}} - c.Assert(g.Empty(), Equals, false) + c.Assert(g.Empty(), check.Equals, false) } -func (s *FilterSuite) TestGroupVersionResourceAnyAll(c *C) { +func (s *FilterSuite) TestGroupVersionResourceAnyAll(c *check.C) { for _, tc := range []struct { g ResourceTypeMatcher gvr schema.GroupVersionResource @@ -211,12 +211,12 @@ func (s *FilterSuite) TestGroupVersionResourceAnyAll(c *C) { all: false, }, } { - c.Check(tc.g.Any(tc.gvr), Equals, tc.any) - c.Check(tc.g.All(tc.gvr), Equals, tc.all) + c.Check(tc.g.Any(tc.gvr), check.Equals, tc.any) + c.Check(tc.g.All(tc.gvr), check.Equals, tc.all) } } -func (s *FilterSuite) TestGroupVersionResourceIncludeExclude(c *C) { +func (s *FilterSuite) TestGroupVersionResourceIncludeExclude(c *check.C) { for _, tc := range []struct { m ResourceTypeMatcher gvrs GroupVersionResourceList @@ -412,12 +412,12 @@ func (s *FilterSuite) TestGroupVersionResourceIncludeExclude(c *C) { }, }, } { - c.Check(tc.gvrs.Include(tc.m), DeepEquals, tc.include) - c.Check(tc.gvrs.Exclude(tc.m), DeepEquals, tc.exclude) + c.Check(tc.gvrs.Include(tc.m), check.DeepEquals, tc.include) + c.Check(tc.gvrs.Exclude(tc.m), check.DeepEquals, tc.exclude) } } -func (s *FilterSuite) TestJoin(c *C) { +func (s *FilterSuite) TestJoin(c *check.C) { for _, tc := range []struct { m []ResourceTypeMatcher out ResourceTypeMatcher @@ -431,11 +431,11 @@ func (s *FilterSuite) TestJoin(c *C) { out: ResourceTypeMatcher{}, }, } { - c.Check(JoinResourceTypeMatchers(tc.m...), DeepEquals, tc.out) + c.Check(JoinResourceTypeMatchers(tc.m...), check.DeepEquals, tc.out) } } -func (s *FilterSuite) TestResourceIncludeExclude(c *C) { +func (s *FilterSuite) TestResourceIncludeExclude(c *check.C) { ssTypeRequirement := ResourceTypeRequirement{Group: "apps", Resource: "statefulsets"} pvcTypeRequirement := ResourceTypeRequirement{Version: "v1", Resource: "persistentvolumeclaims"} ss1 := Resource{Name: "ss1", GVR: schema.GroupVersionResource{Group: "apps", Resource: "statefulsets"}, @@ -623,12 +623,12 @@ func (s *FilterSuite) TestResourceIncludeExclude(c *C) { exclude: []Resource{ss1, ss2, pvc1, pvc2}, }, } { - c.Check(tc.resources.Include(tc.m), DeepEquals, tc.include) - c.Check(tc.resources.Exclude(tc.m), DeepEquals, tc.exclude) + c.Check(tc.resources.Include(tc.m), check.DeepEquals, tc.include) + c.Check(tc.resources.Exclude(tc.m), check.DeepEquals, tc.exclude) } } -func (s *FilterSuite) TestResourceRequirementDeepCopyInto(c *C) { +func (s *FilterSuite) TestResourceRequirementDeepCopyInto(c *check.C) { rr := ResourceRequirement{LocalObjectReference: corev1.LocalObjectReference{Name: "specificname"}, ResourceTypeRequirement: ResourceTypeRequirement{Group: "apps", Resource: "statefulsets"}, LabelSelector: metav1.LabelSelector{ @@ -645,12 +645,12 @@ func (s *FilterSuite) TestResourceRequirementDeepCopyInto(c *C) { } var rrCopy ResourceRequirement rr.DeepCopyInto(&rrCopy) - c.Check(rr, DeepEquals, rrCopy) + c.Check(rr, check.DeepEquals, rrCopy) // Change original and check again to be sure is not equals rr.LocalObjectReference.Name = "newval" - c.Check(rr, Not(DeepEquals), rrCopy) + c.Check(rr, check.Not(check.DeepEquals), rrCopy) rr.LocalObjectReference.Name = "specificname" - c.Check(rr, DeepEquals, rrCopy) + c.Check(rr, check.DeepEquals, rrCopy) rr.ResourceTypeRequirement.Group = "newgroup" - c.Check(rr, Not(DeepEquals), rrCopy) + c.Check(rr, check.Not(check.DeepEquals), rrCopy) } diff --git a/pkg/filter/unstructured_test.go b/pkg/filter/unstructured_test.go index e114bdccf5..cf3f5839cc 100644 --- a/pkg/filter/unstructured_test.go +++ b/pkg/filter/unstructured_test.go @@ -15,16 +15,16 @@ package filter import ( - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/apimachinery/pkg/runtime/schema" ) type UnstructuredSuite struct { } -var _ = Suite(&UnstructuredSuite{}) +var _ = check.Suite(&UnstructuredSuite{}) -func (s *UnstructuredSuite) TestIncludeExclude(c *C) { +func (s *UnstructuredSuite) TestIncludeExclude(c *check.C) { for _, tc := range []struct { s Specs gvr ResourceTypeMatcher @@ -76,7 +76,7 @@ func (s *UnstructuredSuite) TestIncludeExclude(c *C) { }, }, } { - c.Check(tc.s.Include(tc.gvr), DeepEquals, tc.include) - c.Check(tc.s.Exclude(tc.gvr), DeepEquals, tc.exclude) + c.Check(tc.s.Include(tc.gvr), check.DeepEquals, tc.include) + c.Check(tc.s.Exclude(tc.gvr), check.DeepEquals, tc.exclude) } } diff --git a/pkg/format/format_test.go b/pkg/format/format_test.go index fd71478a54..d2dd6531ed 100644 --- a/pkg/format/format_test.go +++ b/pkg/format/format_test.go @@ -6,18 +6,18 @@ import ( "strings" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/output" ) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type FormatTest struct{} -var _ = Suite(&FormatTest{}) +var _ = check.Suite(&FormatTest{}) -func (s *FormatTest) TestLogToForPhaseOutputs(c *C) { +func (s *FormatTest) TestLogToForPhaseOutputs(c *check.C) { const ( pod = "test-pod-logto" container = "test-container-logto" @@ -52,21 +52,21 @@ func (s *FormatTest) TestLogToForPhaseOutputs(c *C) { // create the phase output for each pair of the given k/v kv := &bytes.Buffer{} err := output.PrintOutputTo(kv, key, tc.values[i]) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) kvRaw := fmt.Sprintf("%s\n", kv.String()) if _, err := input.WriteString(kvRaw); err != nil { - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } expected += fmt.Sprintf("%s {\"key\":\"%s\",\"value\":\"%s\"}\n", output.PhaseOpString, key, tc.values[i]) } LogTo(actual, pod, container, input.String()) - c.Check(expected, DeepEquals, actual.String()) + c.Check(expected, check.DeepEquals, actual.String()) } } -func (s *FormatTest) TestLogToForNormalLogs(c *C) { +func (s *FormatTest) TestLogToForNormalLogs(c *check.C) { const ( pod = "test-pod-logto" container = "test-container-logto" @@ -98,7 +98,7 @@ func (s *FormatTest) TestLogToForNormalLogs(c *C) { actual := &bytes.Buffer{} LogTo(actual, pod, container, tc.input) - c.Assert(strings.Contains(actual.String(), tc.expected), Equals, true) - c.Assert(strings.Count(actual.String(), tc.expected), Equals, tc.count) + c.Assert(strings.Contains(actual.String(), tc.expected), check.Equals, true) + c.Assert(strings.Count(actual.String(), tc.expected), check.Equals, tc.count) } } diff --git a/pkg/function/args_test.go b/pkg/function/args_test.go index fc20d9bb68..516d8a7f99 100644 --- a/pkg/function/args_test.go +++ b/pkg/function/args_test.go @@ -15,19 +15,19 @@ package function import ( - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) -var _ = Suite(&ArgsTestSuite{}) +var _ = check.Suite(&ArgsTestSuite{}) type ArgsTestSuite struct { } -func (s *ArgsTestSuite) TestGetYamlList(c *C) { +func (s *ArgsTestSuite) TestGetYamlList(c *check.C) { testCases := []struct { name string args map[string]interface{} - errChecker Checker + errChecker check.Checker valList []string }{ { @@ -35,7 +35,7 @@ func (s *ArgsTestSuite) TestGetYamlList(c *C) { args: map[string]interface{}{ "key": "- val1\n- val2\n- val3\n", }, - errChecker: IsNil, + errChecker: check.IsNil, valList: []string{"val1", "val2", "val3"}, }, { @@ -43,7 +43,7 @@ func (s *ArgsTestSuite) TestGetYamlList(c *C) { args: map[string]interface{}{ "key": []string{"test1", "test2", "test3"}, }, - errChecker: IsNil, + errChecker: check.IsNil, valList: []string{"test1", "test2", "test3"}, }, { @@ -51,7 +51,7 @@ func (s *ArgsTestSuite) TestGetYamlList(c *C) { args: map[string]interface{}{ "key": []interface{}{"test1", "test2", "test3"}, }, - errChecker: IsNil, + errChecker: check.IsNil, valList: []string{"test1", "test2", "test3"}, }, { @@ -59,7 +59,7 @@ func (s *ArgsTestSuite) TestGetYamlList(c *C) { args: map[string]interface{}{ "key": "not a slice", }, - errChecker: NotNil, + errChecker: check.NotNil, valList: nil, }, { @@ -67,14 +67,14 @@ func (s *ArgsTestSuite) TestGetYamlList(c *C) { args: map[string]interface{}{ "invalid": nil, }, - errChecker: IsNil, + errChecker: check.IsNil, valList: nil, }, } for _, tc := range testCases { valList, err := GetYamlList(tc.args, "key") - c.Check(err, tc.errChecker, Commentf("Test: %s Failed!", tc.name)) - c.Check(valList, DeepEquals, tc.valList, Commentf("Test: %s Failed!", tc.name)) + c.Check(err, tc.errChecker, check.Commentf("Test: %s Failed!", tc.name)) + c.Check(valList, check.DeepEquals, tc.valList, check.Commentf("Test: %s Failed!", tc.name)) } } diff --git a/pkg/function/create_csi_snapshot_static_test.go b/pkg/function/create_csi_snapshot_static_test.go index 7e0a68be6d..53ccd0a919 100644 --- a/pkg/function/create_csi_snapshot_static_test.go +++ b/pkg/function/create_csi_snapshot_static_test.go @@ -19,7 +19,7 @@ import ( "fmt" "strings" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -32,11 +32,11 @@ import ( type CreateCSISnapshotStaticTestSuite struct{} -var _ = Suite(&CreateCSISnapshotStaticTestSuite{}) +var _ = check.Suite(&CreateCSISnapshotStaticTestSuite{}) -func (testSuite *CreateCSISnapshotStaticTestSuite) SetUpSuite(c *C) {} +func (testSuite *CreateCSISnapshotStaticTestSuite) SetUpSuite(c *check.C) {} -func (testSuite *CreateCSISnapshotStaticTestSuite) TestCreateCSISnapshotStatic(c *C) { +func (testSuite *CreateCSISnapshotStaticTestSuite) TestCreateCSISnapshotStatic(c *check.C) { const ( snapshotName = "test-snapshot" namespace = "test-namespace" @@ -76,7 +76,7 @@ func (testSuite *CreateCSISnapshotStaticTestSuite) TestCreateCSISnapshotStatic(c scheme := runtime.NewScheme() dynCli := dynfake.NewSimpleDynamicClient(scheme) fakeSnapshotter, err := snapshot.NewSnapshotter(fakeCli, dynCli) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) namespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -84,7 +84,7 @@ func (testSuite *CreateCSISnapshotStaticTestSuite) TestCreateCSISnapshotStatic(c }, } _, err = fakeCli.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) gv := strings.Split(api.GroupVersion, "/") gvr := schema.GroupVersionResource{ @@ -100,7 +100,7 @@ func (testSuite *CreateCSISnapshotStaticTestSuite) TestCreateCSISnapshotStatic(c deletionPolicy, nil) _, err = dynCli.Resource(gvr).Create(ctx, snapshotClass, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = createCSISnapshotStatic( ctx, @@ -110,9 +110,9 @@ func (testSuite *CreateCSISnapshotStaticTestSuite) TestCreateCSISnapshotStatic(c driver, snapshotHandle, snapshotClass.GetName(), false) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = fakeCli.CoreV1().Namespaces().Delete(ctx, namespace.GetName(), metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } diff --git a/pkg/function/create_csi_snapshot_test.go b/pkg/function/create_csi_snapshot_test.go index 8bcfb101ac..e07511c7db 100644 --- a/pkg/function/create_csi_snapshot_test.go +++ b/pkg/function/create_csi_snapshot_test.go @@ -17,7 +17,7 @@ package function import ( "context" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -49,9 +49,9 @@ type CreateCSISnapshotTestSuite struct { storageClass string } -var _ = Suite(&CreateCSISnapshotTestSuite{}) +var _ = check.Suite(&CreateCSISnapshotTestSuite{}) -func (testSuite *CreateCSISnapshotTestSuite) SetUpSuite(c *C) { +func (testSuite *CreateCSISnapshotTestSuite) SetUpSuite(c *check.C) { testSuite.volumeSnapshotClass = snapshotClass testSuite.storageClass = storageClass testSuite.pvcName = pvcName @@ -59,7 +59,7 @@ func (testSuite *CreateCSISnapshotTestSuite) SetUpSuite(c *C) { testSuite.namespace = testCreateNamespace } -func (testSuite *CreateCSISnapshotTestSuite) TestCreateCSISnapshot(c *C) { +func (testSuite *CreateCSISnapshotTestSuite) TestCreateCSISnapshot(c *check.C) { for _, apiResourceList := range []*metav1.APIResourceList{ { TypeMeta: metav1.TypeMeta{ @@ -88,20 +88,20 @@ func (testSuite *CreateCSISnapshotTestSuite) TestCreateCSISnapshot(c *C) { fakeCli.Resources = []*metav1.APIResourceList{apiResourceList} _, err := fakeCli.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testSuite.namespace}}, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) scheme := runtime.NewScheme() fakeSnapshotter, err := snapshot.NewSnapshotter(fakeCli, dynfake.NewSimpleDynamicClient(scheme)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = fakeCli.CoreV1().PersistentVolumeClaims(testSuite.namespace).Create(ctx, getPVCManifest(testSuite.pvcName, testSuite.storageClass), metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = createCSISnapshot(ctx, fakeSnapshotter, testSuite.snapName, testSuite.namespace, testSuite.pvcName, testSuite.volumeSnapshotClass, false, nil) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = fakeCli.CoreV1().Namespaces().Delete(ctx, testSuite.namespace, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } diff --git a/pkg/function/create_volume_from_snapshot_test.go b/pkg/function/create_volume_from_snapshot_test.go index 9bd184e34d..783f801eb8 100644 --- a/pkg/function/create_volume_from_snapshot_test.go +++ b/pkg/function/create_volume_from_snapshot_test.go @@ -19,7 +19,7 @@ import ( "encoding/json" "fmt" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -37,9 +37,9 @@ import ( type CreateVolumeFromSnapshotTestSuite struct{} -var _ = Suite(&CreateVolumeFromSnapshotTestSuite{}) +var _ = check.Suite(&CreateVolumeFromSnapshotTestSuite{}) -func (s *CreateVolumeFromSnapshotTestSuite) TestCreateVolumeFromSnapshot(c *C) { +func (s *CreateVolumeFromSnapshotTestSuite) TestCreateVolumeFromSnapshot(c *check.C) { ctx := context.Background() ns := "ns" mockGetter := mockblockstorage.NewGetter() @@ -86,58 +86,58 @@ func (s *CreateVolumeFromSnapshotTestSuite) TestCreateVolumeFromSnapshot(c *C) { PVCData1 = append(PVCData1, volInfo1) PVCData1 = append(PVCData1, volInfo2) info, err := json.Marshal(PVCData1) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) snapinfo := string(info) for _, tc := range []struct { snapshotinfo string - check Checker + check check.Checker newPVCs []string }{ { snapshotinfo: snapinfo, - check: IsNil, + check: check.IsNil, newPVCs: nil, }, { snapshotinfo: snapinfo, - check: IsNil, + check: check.IsNil, newPVCs: []string{"newpvc-1", "newpvc-2"}, }, } { providerList, err := createVolumeFromSnapshot(ctx, cli, ns, tc.snapshotinfo, tc.newPVCs, profile, mockGetter) - c.Assert(providerList, Not(Equals), tc.check) + c.Assert(providerList, check.Not(check.Equals), tc.check) c.Assert(err, tc.check) if err != nil { continue } - c.Assert(len(providerList) == 2, Equals, true) + c.Assert(len(providerList) == 2, check.Equals, true) provider, ok := providerList["pvc-1"] - c.Assert(ok, Equals, true) - c.Assert(len(provider.(*mockblockstorage.Provider).SnapIDList) == 1, Equals, true) - c.Assert(mockblockstorage.CheckID("snap-1", provider.(*mockblockstorage.Provider).SnapIDList), Equals, true) - c.Assert(len(provider.(*mockblockstorage.Provider).VolIDList) == 1, Equals, true) + c.Assert(ok, check.Equals, true) + c.Assert(len(provider.(*mockblockstorage.Provider).SnapIDList) == 1, check.Equals, true) + c.Assert(mockblockstorage.CheckID("snap-1", provider.(*mockblockstorage.Provider).SnapIDList), check.Equals, true) + c.Assert(len(provider.(*mockblockstorage.Provider).VolIDList) == 1, check.Equals, true) provider, ok = providerList["pvc-2"] - c.Assert(ok, Equals, true) - c.Assert(len(provider.(*mockblockstorage.Provider).SnapIDList) == 1, Equals, true) - c.Assert(mockblockstorage.CheckID("snap-2", provider.(*mockblockstorage.Provider).SnapIDList), Equals, true) - c.Assert(len(provider.(*mockblockstorage.Provider).VolIDList) == 1, Equals, true) + c.Assert(ok, check.Equals, true) + c.Assert(len(provider.(*mockblockstorage.Provider).SnapIDList) == 1, check.Equals, true) + c.Assert(mockblockstorage.CheckID("snap-2", provider.(*mockblockstorage.Provider).SnapIDList), check.Equals, true) + c.Assert(len(provider.(*mockblockstorage.Provider).VolIDList) == 1, check.Equals, true) if tc.newPVCs != nil { _, err = cli.CoreV1().PersistentVolumeClaims(ns).Get(ctx, "newpvc-1", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = cli.CoreV1().PersistentVolumeClaims(ns).Get(ctx, "newpvc-2", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } else { _, err = cli.CoreV1().PersistentVolumeClaims(ns).Get(ctx, "pvc-1", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = cli.CoreV1().PersistentVolumeClaims(ns).Get(ctx, "pvc-2", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } } -func (s *CreateVolumeFromSnapshotTestSuite) TestAddPVProvisionedByAnnotation(c *C) { +func (s *CreateVolumeFromSnapshotTestSuite) TestAddPVProvisionedByAnnotation(c *check.C) { for _, tc := range []struct { st blockstorage.Provider annotations map[string]string @@ -181,6 +181,6 @@ func (s *CreateVolumeFromSnapshotTestSuite) TestAddPVProvisionedByAnnotation(c * }, } { op := addPVProvisionedByAnnotation(tc.annotations, tc.st) - c.Assert(op, DeepEquals, tc.expectedAnnotations) + c.Assert(op, check.DeepEquals, tc.expectedAnnotations) } } diff --git a/pkg/function/create_volume_snapshot_test.go b/pkg/function/create_volume_snapshot_test.go index 45c9f39456..28e02f2520 100644 --- a/pkg/function/create_volume_snapshot_test.go +++ b/pkg/function/create_volume_snapshot_test.go @@ -17,7 +17,7 @@ package function import ( "context" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" k8sresource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,9 +32,9 @@ import ( type CreateVolumeSnapshotTestSuite struct{} -var _ = Suite(&CreateVolumeSnapshotTestSuite{}) +var _ = check.Suite(&CreateVolumeSnapshotTestSuite{}) -func (s *CreateVolumeSnapshotTestSuite) TestGetPVCInfo(c *C) { +func (s *CreateVolumeSnapshotTestSuite) TestGetPVCInfo(c *check.C) { ctx := context.Background() ns := "ns" mockGetter := mockblockstorage.NewGetter() @@ -130,9 +130,9 @@ func (s *CreateVolumeSnapshotTestSuite) TestGetPVCInfo(c *C) { }, ) _, err := cli.CoreV1().PersistentVolumeClaims(ns).Get(ctx, "pvc-test-1", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = cli.CoreV1().PersistentVolumes().Get(ctx, "pv-test-1", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, tc := range []struct { pvc string @@ -142,7 +142,7 @@ func (s *CreateVolumeSnapshotTestSuite) TestGetPVCInfo(c *C) { wantPVC string wantSize int64 wantRegion string - check Checker + check check.Checker }{ { pvc: "pvc-test-1", @@ -152,28 +152,28 @@ func (s *CreateVolumeSnapshotTestSuite) TestGetPVCInfo(c *C) { wantPVC: "pvc-test-1", wantSize: int64(1073741824), wantRegion: "us-west-2", - check: IsNil, + check: check.IsNil, }, { pvc: "pvc-test-2", - check: NotNil, + check: check.NotNil, }, { pvc: "pvc-test-3", - check: NotNil, + check: check.NotNil, }, } { volInfo, err := getPVCInfo(ctx, cli, ns, tc.pvc, tp, mockGetter) c.Assert(err, tc.check) - c.Assert(volInfo, Not(Equals), tc.check) + c.Assert(volInfo, check.Not(check.Equals), tc.check) if err != nil { continue } - c.Assert(volInfo.volumeID, Equals, tc.wantVolumeID) - c.Assert(volInfo.sType, Equals, tc.wantType) - c.Assert(volInfo.volZone, Equals, tc.wantVolZone) - c.Assert(volInfo.pvc, Equals, tc.wantPVC) - c.Assert(volInfo.size, Equals, tc.wantSize) - c.Assert(volInfo.region, Equals, tc.wantRegion) + c.Assert(volInfo.volumeID, check.Equals, tc.wantVolumeID) + c.Assert(volInfo.sType, check.Equals, tc.wantType) + c.Assert(volInfo.volZone, check.Equals, tc.wantVolZone) + c.Assert(volInfo.pvc, check.Equals, tc.wantPVC) + c.Assert(volInfo.size, check.Equals, tc.wantSize) + c.Assert(volInfo.region, check.Equals, tc.wantRegion) } } diff --git a/pkg/function/data_test.go b/pkg/function/data_test.go index ffd3b73d9d..38176eeb10 100644 --- a/pkg/function/data_test.go +++ b/pkg/function/data_test.go @@ -21,7 +21,7 @@ import ( "os" osversioned "github.com/openshift/client-go/apps/clientset/versioned" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/dynamic/fake" @@ -52,22 +52,22 @@ const ( testBucketName = "kio-store-tests" ) -var _ = Suite(&DataSuite{providerType: objectstore.ProviderTypeS3}) -var _ = Suite(&DataSuite{providerType: objectstore.ProviderTypeGCS}) +var _ = check.Suite(&DataSuite{providerType: objectstore.ProviderTypeS3}) +var _ = check.Suite(&DataSuite{providerType: objectstore.ProviderTypeGCS}) -func (s *DataSuite) SetUpSuite(c *C) { +func (s *DataSuite) SetUpSuite(c *check.C) { config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cli, err := kubernetes.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) crCli, err := versioned.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) osCli, err := osversioned.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Make sure the CRD's exist. err = resource.CreateCustomResources(context.Background(), config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli s.crCli = crCli @@ -78,16 +78,16 @@ func (s *DataSuite) SetUpSuite(c *C) { ns.GenerateName = "kanister-datatest-" cns, err := s.cli.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.GetName() sec := testutil.NewTestProfileSecret() sec, err = s.cli.CoreV1().Secrets(s.namespace).Create(ctx, sec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) p := testutil.NewTestProfile(s.namespace, sec.GetName()) _, err = s.crCli.CrV1alpha1().Profiles(s.namespace).Create(ctx, p, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) var location crv1alpha1.Location switch s.providerType { @@ -107,16 +107,16 @@ func (s *DataSuite) SetUpSuite(c *C) { s.profile = testutil.ObjectStoreProfileOrSkip(c, s.providerType, location) err = os.Setenv("POD_NAMESPACE", s.namespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = os.Setenv("POD_SERVICE_ACCOUNT", "default") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *DataSuite) TearDownSuite(c *C) { +func (s *DataSuite) TearDownSuite(c *check.C) { ctx := context.Background() if s.profile != nil { err := location.Delete(ctx, *s.profile, "") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(ctx, s.namespace, metav1.DeleteOptions{}) @@ -137,7 +137,7 @@ func newRestoreDataBlueprint(pvc, identifierArg, identifierVal string) *crv1alph Func: RestoreDataFuncName, Args: map[string]interface{}{ RestoreDataNamespaceArg: "{{ .StatefulSet.Namespace }}", - RestoreDataImageArg: "ghcr.io/kanisterio/kanister-tools:0.110.0", + RestoreDataImageArg: "ghcr.io/kanisterio/kanister-tools:0.111.0", RestoreDataBackupArtifactPrefixArg: "{{ .Profile.Location.Bucket }}/{{ .Profile.Location.Prefix }}", RestoreDataRestorePathArg: "/mnt/data", RestoreDataEncryptionKeyArg: "{{ .Secrets.backupKey.Data.password | toString }}", @@ -249,7 +249,7 @@ func newRestoreDataAllBlueprint() *crv1alpha1.Blueprint { Func: RestoreDataAllFuncName, Args: map[string]interface{}{ RestoreDataAllNamespaceArg: "{{ .StatefulSet.Namespace }}", - RestoreDataAllImageArg: "ghcr.io/kanisterio/kanister-tools:0.110.0", + RestoreDataAllImageArg: "ghcr.io/kanisterio/kanister-tools:0.111.0", RestoreDataAllBackupArtifactPrefixArg: "{{ .Profile.Location.Bucket }}/{{ .Profile.Location.Prefix }}", RestoreDataAllBackupInfo: fmt.Sprintf("{{ .Options.%s }}", BackupDataAllOutput), RestoreDataAllRestorePathArg: "/mnt/data", @@ -283,18 +283,18 @@ func newDeleteDataAllBlueprint() *crv1alpha1.Blueprint { } } -func (s *DataSuite) getTemplateParamsAndPVCName(c *C, replicas int32) (*param.TemplateParams, []string) { +func (s *DataSuite) getTemplateParamsAndPVCName(c *check.C, replicas int32) (*param.TemplateParams, []string) { ctx := context.Background() ss, err := s.cli.AppsV1().StatefulSets(s.namespace).Create(context.TODO(), testutil.NewTestStatefulSet(replicas), metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnStatefulSetReady(ctx, s.cli, ss.GetNamespace(), ss.GetName()) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pvcs := []string{} var i int32 for i = 0; i < replicas; i++ { pvc := testutil.NewTestPVC() pvc, err = s.cli.CoreV1().PersistentVolumeClaims(s.namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pvcs = append(pvcs, pvc.GetName()) } @@ -309,7 +309,7 @@ func (s *DataSuite) getTemplateParamsAndPVCName(c *C, replicas int32) (*param.Te }, } secret, err = s.cli.CoreV1().Secrets(s.namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) as := crv1alpha1.ActionSpec{ Object: crv1alpha1.ObjectReference{ @@ -331,24 +331,24 @@ func (s *DataSuite) getTemplateParamsAndPVCName(c *C, replicas int32) (*param.Te } tp, err := param.New(ctx, s.cli, fake.NewSimpleDynamicClient(k8sscheme.Scheme, ss), s.crCli, s.osCli, as) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) tp.Profile = s.profile return tp, pvcs } -func (s *DataSuite) TestBackupRestoreDeleteData(c *C) { +func (s *DataSuite) TestBackupRestoreDeleteData(c *check.C) { tp, pvcs := s.getTemplateParamsAndPVCName(c, 1) for _, pvc := range pvcs { // Test backup bp := *newBackupDataBlueprint() out := runAction(c, bp, "backup", tp) - c.Assert(out[BackupDataOutputBackupID].(string), Not(Equals), "") - c.Assert(out[BackupDataOutputBackupTag].(string), Not(Equals), "") - c.Check(out[BackupDataStatsOutputFileCount].(string), Not(Equals), "") - c.Check(out[BackupDataStatsOutputSize].(string), Not(Equals), "") - c.Assert(out[FunctionOutputVersion].(string), Equals, kanister.DefaultVersion) + c.Assert(out[BackupDataOutputBackupID].(string), check.Not(check.Equals), "") + c.Assert(out[BackupDataOutputBackupTag].(string), check.Not(check.Equals), "") + c.Check(out[BackupDataStatsOutputFileCount].(string), check.Not(check.Equals), "") + c.Check(out[BackupDataStatsOutputSize].(string), check.Not(check.Equals), "") + c.Assert(out[FunctionOutputVersion].(string), check.Equals, kanister.DefaultVersion) options := map[string]string{ BackupDataOutputBackupID: out[BackupDataOutputBackupID].(string), @@ -365,17 +365,17 @@ func (s *DataSuite) TestBackupRestoreDeleteData(c *C) { } } -func (s *DataSuite) TestBackupRestoreDataWithSnapshotID(c *C) { +func (s *DataSuite) TestBackupRestoreDataWithSnapshotID(c *check.C) { tp, pvcs := s.getTemplateParamsAndPVCName(c, 1) for _, pvc := range pvcs { // Test backup bp := *newBackupDataBlueprint() out := runAction(c, bp, "backup", tp) - c.Assert(out[BackupDataOutputBackupID].(string), Not(Equals), "") - c.Assert(out[BackupDataOutputBackupTag].(string), Not(Equals), "") - c.Check(out[BackupDataStatsOutputFileCount].(string), Not(Equals), "") - c.Check(out[BackupDataStatsOutputSize].(string), Not(Equals), "") - c.Assert(out[FunctionOutputVersion].(string), Equals, kanister.DefaultVersion) + c.Assert(out[BackupDataOutputBackupID].(string), check.Not(check.Equals), "") + c.Assert(out[BackupDataOutputBackupTag].(string), check.Not(check.Equals), "") + c.Check(out[BackupDataStatsOutputFileCount].(string), check.Not(check.Equals), "") + c.Check(out[BackupDataStatsOutputSize].(string), check.Not(check.Equals), "") + c.Assert(out[FunctionOutputVersion].(string), check.Equals, kanister.DefaultVersion) options := map[string]string{ BackupDataOutputBackupID: out[BackupDataOutputBackupID].(string), @@ -389,21 +389,21 @@ func (s *DataSuite) TestBackupRestoreDataWithSnapshotID(c *C) { } } -func (s *DataSuite) TestBackupRestoreDeleteDataAll(c *C) { +func (s *DataSuite) TestBackupRestoreDeleteDataAll(c *check.C) { replicas := int32(2) tp, pvcs := s.getTemplateParamsAndPVCName(c, replicas) // Test backup bp := *newBackupDataAllBlueprint() out := runAction(c, bp, "backup", tp) - c.Assert(out[BackupDataAllOutput].(string), Not(Equals), "") - c.Assert(out[FunctionOutputVersion].(string), Equals, kanister.DefaultVersion) + c.Assert(out[BackupDataAllOutput].(string), check.Not(check.Equals), "") + c.Assert(out[FunctionOutputVersion].(string), check.Equals, kanister.DefaultVersion) output := make(map[string]BackupInfo) - c.Assert(json.Unmarshal([]byte(out[BackupDataAllOutput].(string)), &output), IsNil) - c.Assert(int32(len(output)), Equals, replicas) + c.Assert(json.Unmarshal([]byte(out[BackupDataAllOutput].(string)), &output), check.IsNil) + c.Assert(int32(len(output)), check.Equals, replicas) for k := range output { - c.Assert(k, Equals, output[k].PodName) + c.Assert(k, check.Equals, output[k].PodName) } options := map[string]string{BackupDataAllOutput: out[BackupDataAllOutput].(string)} tp.Options = options @@ -460,7 +460,7 @@ func newCopyDataTestBlueprint() crv1alpha1.Blueprint { Func: RestoreDataFuncName, Args: map[string]interface{}{ RestoreDataNamespaceArg: "{{ .PVC.Namespace }}", - RestoreDataImageArg: "ghcr.io/kanisterio/kanister-tools:0.110.0", + RestoreDataImageArg: "ghcr.io/kanisterio/kanister-tools:0.111.0", RestoreDataBackupArtifactPrefixArg: fmt.Sprintf("{{ .Options.%s }}", CopyVolumeDataOutputBackupArtifactLocation), RestoreDataBackupTagArg: fmt.Sprintf("{{ .Options.%s }}", CopyVolumeDataOutputBackupTag), RestoreDataVolsArg: map[string]string{ @@ -503,10 +503,10 @@ func newCopyDataTestBlueprint() crv1alpha1.Blueprint { }, } } -func (s *DataSuite) TestCopyData(c *C) { +func (s *DataSuite) TestCopyData(c *check.C) { pvcSpec := testutil.NewTestPVC() pvc, err := s.cli.CoreV1().PersistentVolumeClaims(s.namespace).Create(context.TODO(), pvcSpec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) tp := s.initPVCTemplateParams(c, pvc, nil) bp := newCopyDataTestBlueprint() @@ -516,11 +516,11 @@ func (s *DataSuite) TestCopyData(c *C) { out := runAction(c, bp, "copy", tp) // Validate outputs and setup as inputs for restore - c.Assert(out[CopyVolumeDataOutputBackupID].(string), Not(Equals), "") - c.Assert(out[CopyVolumeDataOutputBackupRoot].(string), Not(Equals), "") - c.Assert(out[CopyVolumeDataOutputBackupArtifactLocation].(string), Not(Equals), "") - c.Assert(out[CopyVolumeDataOutputBackupTag].(string), Not(Equals), "") - c.Assert(out[FunctionOutputVersion].(string), Equals, kanister.DefaultVersion) + c.Assert(out[CopyVolumeDataOutputBackupID].(string), check.Not(check.Equals), "") + c.Assert(out[CopyVolumeDataOutputBackupRoot].(string), check.Not(check.Equals), "") + c.Assert(out[CopyVolumeDataOutputBackupArtifactLocation].(string), check.Not(check.Equals), "") + c.Assert(out[CopyVolumeDataOutputBackupTag].(string), check.Not(check.Equals), "") + c.Assert(out[FunctionOutputVersion].(string), check.Equals, kanister.DefaultVersion) options := map[string]string{ CopyVolumeDataOutputBackupID: out[CopyVolumeDataOutputBackupID].(string), CopyVolumeDataOutputBackupRoot: out[CopyVolumeDataOutputBackupRoot].(string), @@ -530,7 +530,7 @@ func (s *DataSuite) TestCopyData(c *C) { // Create a new PVC pvc2, err := s.cli.CoreV1().PersistentVolumeClaims(s.namespace).Create(context.TODO(), pvcSpec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) tp = s.initPVCTemplateParams(c, pvc2, options) // Restore data from copy _ = runAction(c, bp, "restore", tp) @@ -540,13 +540,13 @@ func (s *DataSuite) TestCopyData(c *C) { _ = runAction(c, bp, "delete", tp) } -func runAction(c *C, bp crv1alpha1.Blueprint, action string, tp *param.TemplateParams) map[string]interface{} { +func runAction(c *check.C, bp crv1alpha1.Blueprint, action string, tp *param.TemplateParams) map[string]interface{} { phases, err := kanister.GetPhases(bp, action, kanister.DefaultVersion, *tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) out := make(map[string]interface{}) for _, p := range phases { o, err := p.Exec(context.Background(), bp, action, *tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for k, v := range o { out[k] = v } @@ -554,7 +554,7 @@ func runAction(c *C, bp crv1alpha1.Blueprint, action string, tp *param.TemplateP return out } -func (s *DataSuite) initPVCTemplateParams(c *C, pvc *corev1.PersistentVolumeClaim, options map[string]string) *param.TemplateParams { +func (s *DataSuite) initPVCTemplateParams(c *check.C, pvc *corev1.PersistentVolumeClaim, options map[string]string) *param.TemplateParams { as := crv1alpha1.ActionSpec{ Object: crv1alpha1.ObjectReference{ Kind: param.PVCKind, @@ -568,30 +568,30 @@ func (s *DataSuite) initPVCTemplateParams(c *C, pvc *corev1.PersistentVolumeClai Options: options, } tp, err := param.New(context.Background(), s.cli, fake.NewSimpleDynamicClient(k8sscheme.Scheme, pvc), s.crCli, s.osCli, as) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) tp.Profile = s.profile return tp } -func (s *DataSuite) TestCheckRepository(c *C) { +func (s *DataSuite) TestCheckRepository(c *check.C) { tp, _ := s.getTemplateParamsAndPVCName(c, 1) // Test backup bp := *newBackupDataBlueprint() out := runAction(c, bp, "backup", tp) - c.Assert(out[BackupDataOutputBackupID].(string), Not(Equals), "") - c.Assert(out[BackupDataOutputBackupTag].(string), Not(Equals), "") - c.Assert(out[FunctionOutputVersion].(string), Equals, kanister.DefaultVersion) + c.Assert(out[BackupDataOutputBackupID].(string), check.Not(check.Equals), "") + c.Assert(out[BackupDataOutputBackupTag].(string), check.Not(check.Equals), "") + c.Assert(out[FunctionOutputVersion].(string), check.Equals, kanister.DefaultVersion) // Test CheckRepository bp2 := *newCheckRepositoryBlueprint() out2 := runAction(c, bp2, "checkRepository", tp) - c.Assert(out2[CheckRepositoryPasswordIncorrect].(string), Equals, "false") - c.Assert(out2[CheckRepositoryRepoDoesNotExist].(string), Equals, "false") - c.Assert(out2[FunctionOutputVersion].(string), Equals, kanister.DefaultVersion) + c.Assert(out2[CheckRepositoryPasswordIncorrect].(string), check.Equals, "false") + c.Assert(out2[CheckRepositoryRepoDoesNotExist].(string), check.Equals, "false") + c.Assert(out2[FunctionOutputVersion].(string), check.Equals, kanister.DefaultVersion) } -func (s *DataSuite) TestCheckRepositoryWrongPassword(c *C) { +func (s *DataSuite) TestCheckRepositoryWrongPassword(c *check.C) { tp, _ := s.getTemplateParamsAndPVCName(c, 1) // Test backup @@ -599,31 +599,31 @@ func (s *DataSuite) TestCheckRepositoryWrongPassword(c *C) { bp.Actions["backup"].Phases[0].Args[BackupDataBackupArtifactPrefixArg] = fmt.Sprintf("%s/%s", bp.Actions["backup"].Phases[0].Args[BackupDataBackupArtifactPrefixArg], "abcdef") bp.Actions["backup"].Phases[0].Args[BackupDataEncryptionKeyArg] = "foobar" out := runAction(c, bp, "backup", tp) - c.Assert(out[BackupDataOutputBackupID].(string), Not(Equals), "") - c.Assert(out[BackupDataOutputBackupTag].(string), Not(Equals), "") - c.Assert(out[FunctionOutputVersion].(string), Equals, kanister.DefaultVersion) + c.Assert(out[BackupDataOutputBackupID].(string), check.Not(check.Equals), "") + c.Assert(out[BackupDataOutputBackupTag].(string), check.Not(check.Equals), "") + c.Assert(out[FunctionOutputVersion].(string), check.Equals, kanister.DefaultVersion) // Test CheckRepository bp2 := *newCheckRepositoryBlueprint() bp2.Actions["checkRepository"].Phases[0].Args[CheckRepositoryArtifactPrefixArg] = fmt.Sprintf("%s/%s", bp2.Actions["checkRepository"].Phases[0].Args[CheckRepositoryArtifactPrefixArg], "abcdef") out2 := runAction(c, bp2, "checkRepository", tp) - c.Assert(out2[CheckRepositoryPasswordIncorrect].(string), Equals, "true") + c.Assert(out2[CheckRepositoryPasswordIncorrect].(string), check.Equals, "true") } -func (s *DataSuite) TestCheckRepositoryRepoNotAvailable(c *C) { +func (s *DataSuite) TestCheckRepositoryRepoNotAvailable(c *check.C) { tp, _ := s.getTemplateParamsAndPVCName(c, 1) // Test backup bp := *newBackupDataBlueprint() out := runAction(c, bp, "backup", tp) - c.Assert(out[BackupDataOutputBackupID].(string), Not(Equals), "") - c.Assert(out[BackupDataOutputBackupTag].(string), Not(Equals), "") - c.Assert(out[FunctionOutputVersion].(string), Equals, kanister.DefaultVersion) + c.Assert(out[BackupDataOutputBackupID].(string), check.Not(check.Equals), "") + c.Assert(out[BackupDataOutputBackupTag].(string), check.Not(check.Equals), "") + c.Assert(out[FunctionOutputVersion].(string), check.Equals, kanister.DefaultVersion) // Test CheckRepository bp2 := *newCheckRepositoryBlueprint() bp2.Actions["checkRepository"].Phases[0].Args[CheckRepositoryArtifactPrefixArg] = fmt.Sprintf("%s/%s", bp2.Actions["checkRepository"].Phases[0].Args[CheckRepositoryArtifactPrefixArg], c.TestName()) out2 := runAction(c, bp2, "checkRepository", tp) - c.Assert(out2[CheckRepositoryRepoDoesNotExist].(string), Equals, "true") - c.Assert(out2[FunctionOutputVersion].(string), Equals, kanister.DefaultVersion) + c.Assert(out2[CheckRepositoryRepoDoesNotExist].(string), check.Equals, "true") + c.Assert(out2[FunctionOutputVersion].(string), check.Equals, kanister.DefaultVersion) } diff --git a/pkg/function/delete_csi_snapshot_content_test.go b/pkg/function/delete_csi_snapshot_content_test.go index 14c9db2d70..07e29e6ad9 100644 --- a/pkg/function/delete_csi_snapshot_content_test.go +++ b/pkg/function/delete_csi_snapshot_content_test.go @@ -19,7 +19,7 @@ import ( "fmt" "strings" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -31,11 +31,11 @@ import ( type DeleteCSISnapshotContentTestSuite struct{} -var _ = Suite(&DeleteCSISnapshotContentTestSuite{}) +var _ = check.Suite(&DeleteCSISnapshotContentTestSuite{}) -func (testSuite *DeleteCSISnapshotContentTestSuite) SetUpSuite(c *C) {} +func (testSuite *DeleteCSISnapshotContentTestSuite) SetUpSuite(c *check.C) {} -func (testSuite *DeleteCSISnapshotContentTestSuite) TestDeleteCSISnapshotContent(c *C) { +func (testSuite *DeleteCSISnapshotContentTestSuite) TestDeleteCSISnapshotContent(c *check.C) { const ( snapshotContentName = "test-delete-snapshot-content" snapshotName = "test-delete-snapshot-name" @@ -75,7 +75,7 @@ func (testSuite *DeleteCSISnapshotContentTestSuite) TestDeleteCSISnapshotContent scheme := runtime.NewScheme() dynCli := dynfake.NewSimpleDynamicClient(scheme) fakeSnapshotter, err := snapshot.NewSnapshotter(fakeCli, dynCli) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) source := &snapshot.Source{ Handle: snapshotNamespace, @@ -87,7 +87,7 @@ func (testSuite *DeleteCSISnapshotContentTestSuite) TestDeleteCSISnapshotContent } err = fakeSnapshotter.CreateContentFromSource(ctx, source, snapshotName, snapshotNamespace, deletionPolicy, fakeSnapshotContentMeta) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) gv := strings.Split(api.GroupVersion, "/") gvr := schema.GroupVersionResource{ @@ -97,12 +97,12 @@ func (testSuite *DeleteCSISnapshotContentTestSuite) TestDeleteCSISnapshotContent } _, err = dynCli.Resource(gvr).Get(ctx, snapshotContentName, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = deleteCSISnapshotContent(ctx, fakeSnapshotter, snapshotContentName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = dynCli.Resource(gvr).Get(ctx, snapshotContentName, metav1.GetOptions{}) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } } diff --git a/pkg/function/delete_csi_snapshot_test.go b/pkg/function/delete_csi_snapshot_test.go index 2814dc6fb2..a93021bbef 100644 --- a/pkg/function/delete_csi_snapshot_test.go +++ b/pkg/function/delete_csi_snapshot_test.go @@ -17,7 +17,7 @@ package function import ( "context" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,9 +42,9 @@ type DeleteCSISnapshotTestSuite struct { storageClass string } -var _ = Suite(&DeleteCSISnapshotTestSuite{}) +var _ = check.Suite(&DeleteCSISnapshotTestSuite{}) -func (testSuite *DeleteCSISnapshotTestSuite) SetUpSuite(c *C) { +func (testSuite *DeleteCSISnapshotTestSuite) SetUpSuite(c *check.C) { testSuite.volumeSnapshotClass = snapshotClass testSuite.storageClass = storageClass testSuite.pvcName = originalPVCName @@ -53,7 +53,7 @@ func (testSuite *DeleteCSISnapshotTestSuite) SetUpSuite(c *C) { testSuite.namespace = testDeleteNamespace } -func (testSuite *DeleteCSISnapshotTestSuite) TestDeleteCSISnapshot(c *C) { +func (testSuite *DeleteCSISnapshotTestSuite) TestDeleteCSISnapshot(c *check.C) { for _, apiResourceList := range []*metav1.APIResourceList{ { TypeMeta: metav1.TypeMeta{ @@ -82,11 +82,11 @@ func (testSuite *DeleteCSISnapshotTestSuite) TestDeleteCSISnapshot(c *C) { fakeCli.Resources = []*metav1.APIResourceList{apiResourceList} _, err := fakeCli.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testSuite.namespace}}, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) scheme := runtime.NewScheme() fakeSnapshotter, err := snapshot.NewSnapshotter(fakeCli, dynfake.NewSimpleDynamicClient(scheme)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) originalPVC := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -103,17 +103,17 @@ func (testSuite *DeleteCSISnapshotTestSuite) TestDeleteCSISnapshot(c *C) { }, } _, err = fakeCli.CoreV1().PersistentVolumeClaims(testSuite.namespace).Create(ctx, originalPVC, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) fakeSnapshotMeta := snapshot.ObjectMeta{ Name: testSuite.snapName, Namespace: testSuite.namespace, } err = fakeSnapshotter.Create(ctx, testSuite.pvcName, &testSuite.volumeSnapshotClass, false, fakeSnapshotMeta) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) vs, err := fakeSnapshotter.Get(ctx, testSuite.snapName, testSuite.namespace) - c.Assert(err, IsNil) - c.Assert(vs.Name, Equals, testSuite.snapName) + c.Assert(err, check.IsNil) + c.Assert(vs.Name, check.Equals, testSuite.snapName) restoreArgs := restoreCSISnapshotArgs{ Name: testSuite.snapName, @@ -126,15 +126,15 @@ func (testSuite *DeleteCSISnapshotTestSuite) TestDeleteCSISnapshot(c *C) { } newPVC := newPVCManifest(restoreArgs) _, err = fakeCli.CoreV1().PersistentVolumeClaims(restoreArgs.Namespace).Create(ctx, newPVC, metav1.CreateOptions{}) - c.Assert(err, IsNil) - c.Assert(newPVC.Name, Equals, testSuite.newPVCName) + c.Assert(err, check.IsNil) + c.Assert(newPVC.Name, check.Equals, testSuite.newPVCName) _, err = deleteCSISnapshot(ctx, fakeSnapshotter, testSuite.snapName, testSuite.namespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = fakeSnapshotter.Get(ctx, testSuite.snapName, testSuite.namespace) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) err = fakeCli.CoreV1().Namespaces().Delete(ctx, testSuite.namespace, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } diff --git a/pkg/function/delete_volume_snapshot_test.go b/pkg/function/delete_volume_snapshot_test.go index f12ea4a369..bdb75b9633 100644 --- a/pkg/function/delete_volume_snapshot_test.go +++ b/pkg/function/delete_volume_snapshot_test.go @@ -18,7 +18,7 @@ import ( "context" "encoding/json" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/client-go/kubernetes/fake" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -29,9 +29,9 @@ import ( type DeleteVolumeSnapshotTestSuite struct{} -var _ = Suite(&DeleteVolumeSnapshotTestSuite{}) +var _ = check.Suite(&DeleteVolumeSnapshotTestSuite{}) -func (s *DeleteVolumeSnapshotTestSuite) TestDeleteVolumeSnapshot(c *C) { +func (s *DeleteVolumeSnapshotTestSuite) TestDeleteVolumeSnapshot(c *check.C) { ctx := context.Background() ns := "ns" mockGetter := mockblockstorage.NewGetter() @@ -59,36 +59,36 @@ func (s *DeleteVolumeSnapshotTestSuite) TestDeleteVolumeSnapshot(c *C) { PVCData1 = append(PVCData1, volInfo1) PVCData1 = append(PVCData1, volInfo2) info, err := json.Marshal(PVCData1) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) snapinfo := string(info) for _, tc := range []struct { snapshotinfo string - check Checker + check check.Checker }{ { snapshotinfo: snapinfo, - check: IsNil, + check: check.IsNil, }, } { providerList, err := deleteVolumeSnapshot(ctx, cli, ns, tc.snapshotinfo, profile, mockGetter) - c.Assert(providerList, Not(Equals), tc.check) + c.Assert(providerList, check.Not(check.Equals), tc.check) c.Assert(err, tc.check) if err != nil { continue } - c.Assert(len(providerList) == 2, Equals, true) + c.Assert(len(providerList) == 2, check.Equals, true) provider, ok := providerList["pvc-1"] - c.Assert(ok, Equals, true) - c.Assert(len(provider.(*mockblockstorage.Provider).SnapIDList) == 1, Equals, true) - c.Assert(mockblockstorage.CheckID("snap-1", provider.(*mockblockstorage.Provider).SnapIDList), Equals, true) - c.Assert(len(provider.(*mockblockstorage.Provider).DeletedSnapIDList) == 1, Equals, true) - c.Assert(mockblockstorage.CheckID("snap-1", provider.(*mockblockstorage.Provider).DeletedSnapIDList), Equals, true) + c.Assert(ok, check.Equals, true) + c.Assert(len(provider.(*mockblockstorage.Provider).SnapIDList) == 1, check.Equals, true) + c.Assert(mockblockstorage.CheckID("snap-1", provider.(*mockblockstorage.Provider).SnapIDList), check.Equals, true) + c.Assert(len(provider.(*mockblockstorage.Provider).DeletedSnapIDList) == 1, check.Equals, true) + c.Assert(mockblockstorage.CheckID("snap-1", provider.(*mockblockstorage.Provider).DeletedSnapIDList), check.Equals, true) provider, ok = providerList["pvc-2"] - c.Assert(ok, Equals, true) - c.Assert(len(provider.(*mockblockstorage.Provider).SnapIDList) == 1, Equals, true) - c.Assert(mockblockstorage.CheckID("snap-2", provider.(*mockblockstorage.Provider).SnapIDList), Equals, true) - c.Assert(len(provider.(*mockblockstorage.Provider).DeletedSnapIDList) == 1, Equals, true) - c.Assert(mockblockstorage.CheckID("snap-2", provider.(*mockblockstorage.Provider).DeletedSnapIDList), Equals, true) + c.Assert(ok, check.Equals, true) + c.Assert(len(provider.(*mockblockstorage.Provider).SnapIDList) == 1, check.Equals, true) + c.Assert(mockblockstorage.CheckID("snap-2", provider.(*mockblockstorage.Provider).SnapIDList), check.Equals, true) + c.Assert(len(provider.(*mockblockstorage.Provider).DeletedSnapIDList) == 1, check.Equals, true) + c.Assert(mockblockstorage.CheckID("snap-2", provider.(*mockblockstorage.Provider).DeletedSnapIDList), check.Equals, true) } } diff --git a/pkg/function/e2e_volume_snapshot_test.go b/pkg/function/e2e_volume_snapshot_test.go index 641cb23918..2d696b48ad 100644 --- a/pkg/function/e2e_volume_snapshot_test.go +++ b/pkg/function/e2e_volume_snapshot_test.go @@ -22,7 +22,7 @@ import ( osversioned "github.com/openshift/client-go/apps/clientset/versioned" "golang.org/x/oauth2/google" "google.golang.org/api/compute/v1" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8sresource "k8s.io/apimachinery/pkg/api/resource" @@ -55,21 +55,21 @@ type VolumeSnapshotTestSuite struct { tp *param.TemplateParams } -var _ = Suite(&VolumeSnapshotTestSuite{}) +var _ = check.Suite(&VolumeSnapshotTestSuite{}) -func (s *VolumeSnapshotTestSuite) SetUpTest(c *C) { +func (s *VolumeSnapshotTestSuite) SetUpTest(c *check.C) { config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cli, err := kubernetes.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) crCli, err := versioned.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) osCli, err := osversioned.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Make sure the CRD's exist. err = resource.CreateCustomResources(context.Background(), config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli s.crCli = crCli @@ -78,33 +78,33 @@ func (s *VolumeSnapshotTestSuite) SetUpTest(c *C) { ns := testutil.NewTestNamespace() cns, err := s.cli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.GetName() ctx := context.Background() ss, err := s.cli.AppsV1().StatefulSets(s.namespace).Create(ctx, newStatefulSet(s.namespace), metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnStatefulSetReady(ctx, s.cli, ss.GetNamespace(), ss.GetName()) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pods, _, err := kube.FetchPods(s.cli, s.namespace, ss.UID) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) volToPvc := kube.StatefulSetVolumes(s.cli, ss, &pods[0]) pvc := volToPvc[pods[0].Spec.Containers[0].VolumeMounts[0].Name] - c.Assert(len(pvc) > 0, Equals, true) + c.Assert(len(pvc) > 0, check.Equals, true) id, secret, locationType, err := s.getCreds(c, ctx, s.cli, s.namespace, pvc) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) if id == "" || secret == "" { c.Skip("Skipping the test since storage type not supported") } sec := NewTestProfileSecret(id, secret) sec, err = s.cli.CoreV1().Secrets(s.namespace).Create(ctx, sec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) p := NewTestProfile(s.namespace, sec.GetName(), locationType) _, err = s.crCli.CrV1alpha1().Profiles(s.namespace).Create(ctx, p, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) as := crv1alpha1.ActionSpec{ Object: crv1alpha1.ObjectReference{ @@ -119,7 +119,7 @@ func (s *VolumeSnapshotTestSuite) SetUpTest(c *C) { } tp, err := param.New(ctx, s.cli, fake.NewSimpleDynamicClient(k8sscheme.Scheme, ss), s.crCli, s.osCli, as) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.tp = tp } @@ -166,7 +166,7 @@ func NewTestProfile(namespace string, secretName string, locationType crv1alpha1 } } -func (s *VolumeSnapshotTestSuite) TearDownTest(c *C) { +func (s *VolumeSnapshotTestSuite) TearDownTest(c *check.C) { if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } @@ -308,31 +308,31 @@ func newStatefulSet(namespace string) *appsv1.StatefulSet { } } -func (s *VolumeSnapshotTestSuite) TestVolumeSnapshot(c *C) { +func (s *VolumeSnapshotTestSuite) TestVolumeSnapshot(c *check.C) { ctx := context.Background() actions := []string{"backup", "restore", "delete"} bp := newVolumeSnapshotBlueprint() for _, action := range actions { phases, err := kanister.GetPhases(*bp, action, kanister.DefaultVersion, *s.tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, p := range phases { - c.Assert(param.InitPhaseParams(ctx, s.cli, s.tp, p.Name(), p.Objects()), IsNil) + c.Assert(param.InitPhaseParams(ctx, s.cli, s.tp, p.Name(), p.Objects()), check.IsNil) output, err := p.Exec(ctx, *bp, action, *s.tp) if err != nil && strings.Contains(err.Error(), skipTestErrorMsg) { c.Skip("Skipping the test since storage type not supported") } - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) param.UpdatePhaseParams(ctx, s.tp, p.Name(), output) if action == "backup" { arts, err := param.RenderArtifacts(bp.Actions[action].OutputArtifacts, *s.tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.tp.ArtifactsIn = arts } } } } -func (s *VolumeSnapshotTestSuite) getCreds(c *C, ctx context.Context, cli kubernetes.Interface, namespace string, pvcname string) (string, string, crv1alpha1.LocationType, error) { +func (s *VolumeSnapshotTestSuite) getCreds(c *check.C, ctx context.Context, cli kubernetes.Interface, namespace string, pvcname string) (string, string, crv1alpha1.LocationType, error) { pvc, err := cli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcname, metav1.GetOptions{}) if err != nil { return "", "", "", err @@ -358,7 +358,7 @@ func (s *VolumeSnapshotTestSuite) getCreds(c *C, ctx context.Context, cli kubern return "", "", "", nil } -func GetEnvOrSkip(c *C, varName string) string { +func GetEnvOrSkip(c *check.C, varName string) string { v := os.Getenv(varName) // Ensure the variable is set if v == "" { diff --git a/pkg/function/export_rds_snapshot_location.go b/pkg/function/export_rds_snapshot_location.go index 2bf6d341a7..caf4861877 100644 --- a/pkg/function/export_rds_snapshot_location.go +++ b/pkg/function/export_rds_snapshot_location.go @@ -66,7 +66,7 @@ const ( BackupAction RDSAction = "backup" RestoreAction RDSAction = "restore" - defaultPostgresToolsImage = "ghcr.io/kanisterio/postgres-kanister-tools:0.110.0" + defaultPostgresToolsImage = "ghcr.io/kanisterio/postgres-kanister-tools:0.111.0" ) type exportRDSSnapshotToLocationFunc struct { diff --git a/pkg/function/function_test.go b/pkg/function/function_test.go index 959e587e65..bf20cf8a7b 100644 --- a/pkg/function/function_test.go +++ b/pkg/function/function_test.go @@ -17,8 +17,8 @@ package function import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } diff --git a/pkg/function/kube_exec_all_test.go b/pkg/function/kube_exec_all_test.go index 4f398d3fa0..1a5d91452e 100644 --- a/pkg/function/kube_exec_all_test.go +++ b/pkg/function/kube_exec_all_test.go @@ -19,7 +19,7 @@ import ( "fmt" osversioned "github.com/openshift/client-go/apps/clientset/versioned" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/dynamic/fake" @@ -42,21 +42,21 @@ type KubeExecAllTest struct { namespace string } -var _ = Suite(&KubeExecAllTest{}) +var _ = check.Suite(&KubeExecAllTest{}) -func (s *KubeExecAllTest) SetUpSuite(c *C) { +func (s *KubeExecAllTest) SetUpSuite(c *check.C) { config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cli, err := kubernetes.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) crCli, err := versioned.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) osCli, err := osversioned.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Make sure the CRD's exist. err = resource.CreateCustomResources(context.Background(), config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli s.crCli = crCli @@ -69,19 +69,19 @@ func (s *KubeExecAllTest) SetUpSuite(c *C) { }, } cns, err := s.cli.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name sec := testutil.NewTestProfileSecret() sec, err = s.cli.CoreV1().Secrets(s.namespace).Create(ctx, sec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) p := testutil.NewTestProfile(s.namespace, sec.GetName()) _, err = s.crCli.CrV1alpha1().Profiles(s.namespace).Create(ctx, p, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *KubeExecAllTest) TearDownSuite(c *C) { +func (s *KubeExecAllTest) TearDownSuite(c *check.C) { if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } @@ -109,14 +109,14 @@ func newExecAllBlueprint(kind string) *crv1alpha1.Blueprint { } } -func (s *KubeExecAllTest) TestKubeExecAllDeployment(c *C) { +func (s *KubeExecAllTest) TestKubeExecAllDeployment(c *check.C) { ctx := context.Background() d := testutil.NewTestDeployment(1) d, err := s.cli.AppsV1().Deployments(s.namespace).Create(context.TODO(), d, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnDeploymentReady(ctx, s.cli, d.Namespace, d.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) kind := "Deployment" as := crv1alpha1.ActionSpec{ @@ -131,26 +131,26 @@ func (s *KubeExecAllTest) TestKubeExecAllDeployment(c *C) { }, } tp, err := param.New(ctx, s.cli, fake.NewSimpleDynamicClient(k8sscheme.Scheme, d), s.crCli, s.osCli, as) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) action := "echo" bp := newExecAllBlueprint(kind) phases, err := kanister.GetPhases(*bp, action, kanister.DefaultVersion, *tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, p := range phases { _, err = p.Exec(ctx, *bp, action, *tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } -func (s *KubeExecAllTest) TestKubeExecAllStatefulSet(c *C) { +func (s *KubeExecAllTest) TestKubeExecAllStatefulSet(c *check.C) { ctx := context.Background() ss := testutil.NewTestStatefulSet(1) ss, err := s.cli.AppsV1().StatefulSets(s.namespace).Create(context.TODO(), ss, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnStatefulSetReady(ctx, s.cli, ss.Namespace, ss.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) kind := "StatefulSet" as := crv1alpha1.ActionSpec{ @@ -165,14 +165,14 @@ func (s *KubeExecAllTest) TestKubeExecAllStatefulSet(c *C) { }, } tp, err := param.New(ctx, s.cli, fake.NewSimpleDynamicClient(k8sscheme.Scheme, ss), s.crCli, s.osCli, as) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) action := "echo" bp := newExecAllBlueprint(kind) phases, err := kanister.GetPhases(*bp, action, kanister.DefaultVersion, *tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, p := range phases { _, err = p.Exec(ctx, *bp, action, *tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } diff --git a/pkg/function/kube_exec_test.go b/pkg/function/kube_exec_test.go index cc77b4438f..00ec4beb68 100644 --- a/pkg/function/kube_exec_test.go +++ b/pkg/function/kube_exec_test.go @@ -20,7 +20,7 @@ import ( "strings" osversioned "github.com/openshift/client-go/apps/clientset/versioned" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/dynamic/fake" @@ -43,21 +43,21 @@ type KubeExecTest struct { namespace string } -var _ = Suite(&KubeExecTest{}) +var _ = check.Suite(&KubeExecTest{}) -func (s *KubeExecTest) SetUpSuite(c *C) { +func (s *KubeExecTest) SetUpSuite(c *check.C) { config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cli, err := kubernetes.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) crCli, err := versioned.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) osCli, err := osversioned.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Make sure the CRD's exist. err = resource.CreateCustomResources(context.Background(), config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli s.crCli = crCli @@ -70,19 +70,19 @@ func (s *KubeExecTest) SetUpSuite(c *C) { }, } cns, err := s.cli.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name sec := testutil.NewTestProfileSecret() sec, err = s.cli.CoreV1().Secrets(s.namespace).Create(ctx, sec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) p := testutil.NewTestProfile(s.namespace, sec.GetName()) _, err = s.crCli.CrV1alpha1().Profiles(s.namespace).Create(ctx, p, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *KubeExecTest) TearDownSuite(c *C) { +func (s *KubeExecTest) TearDownSuite(c *check.C) { if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } @@ -136,15 +136,15 @@ spec: args: ["-f", "/dev/null"] ` -func (s *KubeExecTest) TestKubeExec(c *C) { +func (s *KubeExecTest) TestKubeExec(c *check.C) { ctx := context.Background() name := strings.ToLower(c.TestName()) name = strings.Replace(name, ".", "", 1) spec := fmt.Sprintf(ssSpec, name) ss, err := kube.CreateStatefulSet(ctx, s.cli, s.namespace, spec) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnStatefulSetReady(ctx, s.cli, ss.GetNamespace(), ss.GetName()) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) kind := "Statefulset" // Run the delete action. @@ -160,38 +160,38 @@ func (s *KubeExecTest) TestKubeExec(c *C) { }, } tp, err := param.New(ctx, s.cli, fake.NewSimpleDynamicClient(k8sscheme.Scheme, ss), s.crCli, s.osCli, as) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) action := "echo" bp := newKubeExecBlueprint() phases, err := kanister.GetPhases(*bp, action, kanister.DefaultVersion, *tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, p := range phases { _, err = p.Exec(context.Background(), *bp, action, *tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } -func (s *KubeExecTest) TestParseLogAndCreateOutput(c *C) { +func (s *KubeExecTest) TestParseLogAndCreateOutput(c *check.C) { for _, tc := range []struct { log string expected map[string]interface{} - errChecker Checker - outChecker Checker + errChecker check.Checker + outChecker check.Checker }{ - {"###Phase-output###: {\"key\":\"version\",\"value\":\"0.110.0\"}", map[string]interface{}{"version": "0.110.0"}, IsNil, NotNil}, + {"###Phase-output###: {\"key\":\"version\",\"value\":\"0.110.0\"}", map[string]interface{}{"version": "0.110.0"}, check.IsNil, check.NotNil}, {"###Phase-output###: {\"key\":\"version\",\"value\":\"0.110.0\"}\n###Phase-output###: {\"key\":\"path\",\"value\":\"/backup/path\"}", - map[string]interface{}{"version": "0.110.0", "path": "/backup/path"}, IsNil, NotNil}, - {"Random message ###Phase-output###: {\"key\":\"version\",\"value\":\"0.110.0\"}", map[string]interface{}{"version": "0.110.0"}, IsNil, NotNil}, - {"Random message with newline \n###Phase-output###: {\"key\":\"version\",\"value\":\"0.110.0\"}", map[string]interface{}{"version": "0.110.0"}, IsNil, NotNil}, - {"###Phase-output###: Invalid message", nil, NotNil, IsNil}, - {"Random message", nil, IsNil, IsNil}, + map[string]interface{}{"version": "0.110.0", "path": "/backup/path"}, check.IsNil, check.NotNil}, + {"Random message ###Phase-output###: {\"key\":\"version\",\"value\":\"0.110.0\"}", map[string]interface{}{"version": "0.110.0"}, check.IsNil, check.NotNil}, + {"Random message with newline \n###Phase-output###: {\"key\":\"version\",\"value\":\"0.110.0\"}", map[string]interface{}{"version": "0.110.0"}, check.IsNil, check.NotNil}, + {"###Phase-output###: Invalid message", nil, check.NotNil, check.IsNil}, + {"Random message", nil, check.IsNil, check.IsNil}, } { out, err := parseLogAndCreateOutput(tc.log) c.Check(err, tc.errChecker) c.Check(out, tc.outChecker) if out != nil { - c.Check(out, DeepEquals, tc.expected) + c.Check(out, check.DeepEquals, tc.expected) } } } diff --git a/pkg/function/kube_task_test.go b/pkg/function/kube_task_test.go index 92baaf0b49..a10ca0535f 100644 --- a/pkg/function/kube_task_test.go +++ b/pkg/function/kube_task_test.go @@ -21,7 +21,7 @@ import ( "strings" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -33,16 +33,16 @@ import ( "github.com/kanisterio/kanister/pkg/param" ) -var _ = Suite(&KubeTaskSuite{}) +var _ = check.Suite(&KubeTaskSuite{}) type KubeTaskSuite struct { cli kubernetes.Interface namespace string } -func (s *KubeTaskSuite) SetUpSuite(c *C) { +func (s *KubeTaskSuite) SetUpSuite(c *check.C) { cli, err := kube.NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli ns := &corev1.Namespace{ @@ -51,15 +51,15 @@ func (s *KubeTaskSuite) SetUpSuite(c *C) { }, } cns, err := s.cli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name err = os.Setenv("POD_NAMESPACE", cns.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = os.Setenv("POD_SERVICE_ACCOUNT", "default") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *KubeTaskSuite) TearDownSuite(c *C) { +func (s *KubeTaskSuite) TearDownSuite(c *check.C) { if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } @@ -93,7 +93,7 @@ func outputPhase(namespace string) crv1alpha1.BlueprintPhase { KubeTaskCommandArg: []string{ "sh", "-c", - "kando output version 0.110.0", + "kando output version 0.111.0", }, }, } @@ -141,7 +141,7 @@ func newTaskBlueprint(phases ...crv1alpha1.BlueprintPhase) *crv1alpha1.Blueprint } } -func (s *KubeTaskSuite) TestKubeTask(c *C) { +func (s *KubeTaskSuite) TestKubeTask(c *check.C) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() tp := param.TemplateParams{ @@ -166,7 +166,7 @@ func (s *KubeTaskSuite) TestKubeTask(c *C) { bp: newTaskBlueprint(outputPhase(s.namespace), sleepPhase(s.namespace), tickPhase(s.namespace)), outs: []map[string]interface{}{ { - "version": "0.110.0", + "version": "0.111.0", }, {}, {}, @@ -174,17 +174,17 @@ func (s *KubeTaskSuite) TestKubeTask(c *C) { }, } { phases, err := kanister.GetPhases(*tc.bp, action, kanister.DefaultVersion, tp) - c.Assert(err, IsNil) - c.Assert(phases, HasLen, len(tc.outs)) + c.Assert(err, check.IsNil) + c.Assert(phases, check.HasLen, len(tc.outs)) for i, p := range phases { out, err := p.Exec(ctx, *tc.bp, action, tp) - c.Assert(err, IsNil, Commentf("Phase %s failed", p.Name())) - c.Assert(out, DeepEquals, tc.outs[i]) + c.Assert(err, check.IsNil, check.Commentf("Phase %s failed", p.Name())) + c.Assert(out, check.DeepEquals, tc.outs[i]) } } } -func (s *KubeTaskSuite) TestKubeTaskWithBigOutput(c *C) { +func (s *KubeTaskSuite) TestKubeTaskWithBigOutput(c *check.C) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() tp := param.TemplateParams{ @@ -216,12 +216,12 @@ func (s *KubeTaskSuite) TestKubeTaskWithBigOutput(c *C) { }, } { phases, err := kanister.GetPhases(*tc.bp, action, kanister.DefaultVersion, tp) - c.Assert(err, IsNil) - c.Assert(phases, HasLen, len(tc.outs)) + c.Assert(err, check.IsNil) + c.Assert(phases, check.HasLen, len(tc.outs)) for i, p := range phases { out, err := p.Exec(ctx, *tc.bp, action, tp) - c.Assert(err, IsNil, Commentf("Phase %s failed", p.Name())) - c.Assert(out, DeepEquals, tc.outs[i]) + c.Assert(err, check.IsNil, check.Commentf("Phase %s failed", p.Name())) + c.Assert(out, check.DeepEquals, tc.outs[i]) } } } diff --git a/pkg/function/kubeops_test.go b/pkg/function/kubeops_test.go index 30ef45abe5..02d3a4233d 100644 --- a/pkg/function/kubeops_test.go +++ b/pkg/function/kubeops_test.go @@ -19,7 +19,7 @@ import ( "fmt" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" extensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" crdclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -87,7 +87,7 @@ spec: testServiceName = "test-service" ) -var _ = Suite(&KubeOpsSuite{}) +var _ = check.Suite(&KubeOpsSuite{}) type KubeOpsSuite struct { kubeCli kubernetes.Interface @@ -96,13 +96,13 @@ type KubeOpsSuite struct { namespace string } -func (s *KubeOpsSuite) SetUpSuite(c *C) { +func (s *KubeOpsSuite) SetUpSuite(c *check.C) { cli, err := kube.NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.kubeCli = cli dynCli, err := kube.NewDynamicClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.dynCli = dynCli ns := &corev1.Namespace{ @@ -111,20 +111,20 @@ func (s *KubeOpsSuite) SetUpSuite(c *C) { }, } cns, err := s.kubeCli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name // Create CRD crdCli, err := kube.NewCRDClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.crdCli = crdCli _, err = s.crdCli.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), getSampleCRD(), metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { return } - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *KubeOpsSuite) TearDownSuite(c *C) { +func (s *KubeOpsSuite) TearDownSuite(c *check.C) { if s.namespace != "" { _ = s.kubeCli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } @@ -182,7 +182,7 @@ func newCreateResourceBlueprint(phases ...crv1alpha1.BlueprintPhase) crv1alpha1. } } -func (s *KubeOpsSuite) TestKubeOps(c *C) { +func (s *KubeOpsSuite) TestKubeOps(c *check.C) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() tp := param.TemplateParams{} @@ -215,12 +215,12 @@ func (s *KubeOpsSuite) TestKubeOps(c *C) { } { bp := newCreateResourceBlueprint(createInSpecsNsPhase(tc.spec, tc.name, s.namespace)) phases, err := kanister.GetPhases(bp, action, kanister.DefaultVersion, tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, p := range phases { out, err := p.Exec(ctx, bp, action, tp) - c.Assert(err, IsNil, Commentf("Phase %s failed", p.Name())) + c.Assert(err, check.IsNil, check.Commentf("Phase %s failed", p.Name())) _, err = s.dynCli.Resource(tc.expResource.gvr).Namespace(tc.expResource.namespace).Get(context.TODO(), tc.name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) expOut := map[string]interface{}{ "apiVersion": tc.expResource.gvr.Version, "group": tc.expResource.gvr.Group, @@ -229,14 +229,14 @@ func (s *KubeOpsSuite) TestKubeOps(c *C) { "name": tc.name, "namespace": tc.expResource.namespace, } - c.Assert(out, DeepEquals, expOut) + c.Assert(out, check.DeepEquals, expOut) err = s.dynCli.Resource(tc.expResource.gvr).Namespace(s.namespace).Delete(ctx, tc.name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } } -func (s *KubeOpsSuite) TestKubeOpsCreateDeleteWithCoreResource(c *C) { +func (s *KubeOpsSuite) TestKubeOpsCreateDeleteWithCoreResource(c *check.C) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() tp := param.TemplateParams{} @@ -248,17 +248,17 @@ func (s *KubeOpsSuite) TestKubeOpsCreateDeleteWithCoreResource(c *C) { bp := newCreateResourceBlueprint(createPhase(s.namespace, spec), deletePhase(gvr, serviceName, s.namespace)) phases, err := kanister.GetPhases(bp, action, kanister.DefaultVersion, tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, p := range phases { out, err := p.Exec(ctx, bp, action, tp) - c.Assert(err, IsNil, Commentf("Phase %s failed", p.Name())) + c.Assert(err, check.IsNil, check.Commentf("Phase %s failed", p.Name())) _, err = s.dynCli.Resource(gvr).Namespace(s.namespace).Get(ctx, serviceName, metav1.GetOptions{}) if p.Name() == "deleteDeploy" { - c.Assert(err, NotNil) - c.Assert(apierrors.IsNotFound(err), Equals, true) + c.Assert(err, check.NotNil) + c.Assert(apierrors.IsNotFound(err), check.Equals, true) } else { - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } expOut := map[string]interface{}{ @@ -269,11 +269,11 @@ func (s *KubeOpsSuite) TestKubeOpsCreateDeleteWithCoreResource(c *C) { "name": serviceName, "namespace": s.namespace, } - c.Assert(out, DeepEquals, expOut) + c.Assert(out, check.DeepEquals, expOut) } } -func (s *KubeOpsSuite) TestKubeOpsCreateWaitDelete(c *C) { +func (s *KubeOpsSuite) TestKubeOpsCreateWaitDelete(c *check.C) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() tp := param.TemplateParams{ @@ -287,17 +287,17 @@ func (s *KubeOpsSuite) TestKubeOpsCreateWaitDelete(c *C) { waitDeployPhase(s.namespace, deployName), deletePhase(gvr, deployName, s.namespace)) phases, err := kanister.GetPhases(bp, action, kanister.DefaultVersion, tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, p := range phases { out, err := p.Exec(ctx, bp, action, tp) - c.Assert(err, IsNil, Commentf("Phase %s failed", p.Name())) + c.Assert(err, check.IsNil, check.Commentf("Phase %s failed", p.Name())) _, err = s.dynCli.Resource(gvr).Namespace(s.namespace).Get(context.TODO(), deployName, metav1.GetOptions{}) if p.Name() == "deleteDeploy" { - c.Assert(err, NotNil) - c.Assert(apierrors.IsNotFound(err), Equals, true) + c.Assert(err, check.NotNil) + c.Assert(apierrors.IsNotFound(err), check.Equals, true) } else { - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } if p.Name() == "waitDeployReady" { @@ -311,7 +311,7 @@ func (s *KubeOpsSuite) TestKubeOpsCreateWaitDelete(c *C) { "name": deployName, "namespace": s.namespace, } - c.Assert(out, DeepEquals, expOut) + c.Assert(out, check.DeepEquals, expOut) } } diff --git a/pkg/function/prepare_data_test.go b/pkg/function/prepare_data_test.go index 8392efdeb2..6502352e21 100644 --- a/pkg/function/prepare_data_test.go +++ b/pkg/function/prepare_data_test.go @@ -18,7 +18,7 @@ import ( "context" "fmt" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -30,7 +30,7 @@ import ( "github.com/kanisterio/kanister/pkg/testutil" ) -var _ = Suite(&PrepareDataSuite{}) +var _ = check.Suite(&PrepareDataSuite{}) const ( deployment = "Deployment" @@ -42,9 +42,9 @@ type PrepareDataSuite struct { namespace string } -func (s *PrepareDataSuite) SetUpSuite(c *C) { +func (s *PrepareDataSuite) SetUpSuite(c *check.C) { cli, err := kube.NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli ns := &corev1.Namespace{ @@ -53,11 +53,11 @@ func (s *PrepareDataSuite) SetUpSuite(c *C) { }, } cns, err := s.cli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name } -func (s *PrepareDataSuite) TearDownSuite(c *C) { +func (s *PrepareDataSuite) TearDownSuite(c *check.C) { if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } @@ -114,10 +114,10 @@ func newPrepareDataBlueprint(kind, pvc string) *crv1alpha1.Blueprint { } } -func (s *PrepareDataSuite) TestPrepareData(c *C) { +func (s *PrepareDataSuite) TestPrepareData(c *check.C) { pvc := testutil.NewTestPVC() createdPVC, err := s.cli.CoreV1().PersistentVolumeClaims(s.namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctx := context.Background() for _, kind := range []string{deployment, statefulset} { @@ -145,10 +145,10 @@ func (s *PrepareDataSuite) TestPrepareData(c *C) { action := "test" bp := newPrepareDataBlueprint(kind, createdPVC.Name) phases, err := kanister.GetPhases(*bp, action, kanister.DefaultVersion, tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, p := range phases { _, err = p.Exec(ctx, *bp, action, tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } } diff --git a/pkg/function/rds_functions_test.go b/pkg/function/rds_functions_test.go index b6a78c22df..fc037e6749 100644 --- a/pkg/function/rds_functions_test.go +++ b/pkg/function/rds_functions_test.go @@ -19,7 +19,7 @@ import ( "fmt" "strings" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/param" "github.com/kanisterio/kanister/pkg/postgres" @@ -27,9 +27,9 @@ import ( type RDSFunctionsTest struct{} -var _ = Suite(&RDSFunctionsTest{}) +var _ = check.Suite(&RDSFunctionsTest{}) -func (s *RDSFunctionsTest) TestPrepareCommand(c *C) { +func (s *RDSFunctionsTest) TestPrepareCommand(c *check.C) { testCases := []struct { name string dbEngine RDSDBEngine @@ -41,7 +41,7 @@ func (s *RDSFunctionsTest) TestPrepareCommand(c *C) { backupPrefix string backupID string dbEngineVersion string - errChecker Checker + errChecker check.Checker tp param.TemplateParams command []string }{ @@ -55,7 +55,7 @@ func (s *RDSFunctionsTest) TestPrepareCommand(c *C) { backupPrefix: "/backup/postgres-backup", backupID: "backup-id", dbEngineVersion: "12.7", - errChecker: IsNil, + errChecker: check.IsNil, dbList: []string{"template1"}, command: []string{"bash", "-o", "errexit", "-o", "pipefail", "-c", fmt.Sprintf(` @@ -74,7 +74,7 @@ func (s *RDSFunctionsTest) TestPrepareCommand(c *C) { backupPrefix: "/backup/postgres-backup", backupID: "backup-id", dbEngineVersion: "13.3", - errChecker: IsNil, + errChecker: check.IsNil, dbList: []string{"template1"}, command: []string{"bash", "-o", "errexit", "-o", "pipefail", "-c", fmt.Sprintf(` @@ -93,7 +93,7 @@ func (s *RDSFunctionsTest) TestPrepareCommand(c *C) { backupPrefix: "/backup/postgres-backup", backupID: "backup-id", dbEngineVersion: "12.7", - errChecker: IsNil, + errChecker: check.IsNil, dbList: []string{"template1"}, command: []string{"bash", "-o", "errexit", "-o", "pipefail", "-c", fmt.Sprintf(` @@ -121,7 +121,7 @@ func (s *RDSFunctionsTest) TestPrepareCommand(c *C) { backupPrefix: "/backup/postgres-backup", backupID: "backup-id", dbEngineVersion: "12.7", - errChecker: NotNil, + errChecker: check.NotNil, dbList: []string{"template1"}, command: nil, }, @@ -130,7 +130,7 @@ func (s *RDSFunctionsTest) TestPrepareCommand(c *C) { for _, tc := range testCases { outCommand, err := prepareCommand(context.Background(), tc.dbEngine, tc.action, tc.dbEndpoint, tc.username, tc.password, tc.dbList, tc.backupPrefix, tc.backupID, tc.tp.Profile, tc.dbEngineVersion) - c.Check(err, tc.errChecker, Commentf("Case %s failed", tc.name)) - c.Assert(outCommand, DeepEquals, tc.command) + c.Check(err, tc.errChecker, check.Commentf("Case %s failed", tc.name)) + c.Assert(outCommand, check.DeepEquals, tc.command) } } diff --git a/pkg/function/restore_csi_snapshot_test.go b/pkg/function/restore_csi_snapshot_test.go index 9a2ddb590b..6ed1104e32 100644 --- a/pkg/function/restore_csi_snapshot_test.go +++ b/pkg/function/restore_csi_snapshot_test.go @@ -17,7 +17,7 @@ package function import ( "context" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -46,9 +46,9 @@ type RestoreCSISnapshotTestSuite struct { storageClass string } -var _ = Suite(&RestoreCSISnapshotTestSuite{}) +var _ = check.Suite(&RestoreCSISnapshotTestSuite{}) -func (testSuite *RestoreCSISnapshotTestSuite) SetUpSuite(c *C) { +func (testSuite *RestoreCSISnapshotTestSuite) SetUpSuite(c *check.C) { testSuite.volumeSnapshotClass = snapshotClass testSuite.storageClass = storageClass testSuite.pvcName = originalPVCName @@ -57,7 +57,7 @@ func (testSuite *RestoreCSISnapshotTestSuite) SetUpSuite(c *C) { testSuite.namespace = testRestoreNamespace } -func (testSuite *RestoreCSISnapshotTestSuite) TestRestoreCSISnapshot(c *C) { +func (testSuite *RestoreCSISnapshotTestSuite) TestRestoreCSISnapshot(c *check.C) { for _, apiResourceList := range []*metav1.APIResourceList{ { TypeMeta: metav1.TypeMeta{ @@ -86,11 +86,11 @@ func (testSuite *RestoreCSISnapshotTestSuite) TestRestoreCSISnapshot(c *C) { fakeCli.Resources = []*metav1.APIResourceList{apiResourceList} _, err := fakeCli.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testSuite.namespace}}, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) scheme := runtime.NewScheme() fakeSnapshotter, err := snapshot.NewSnapshotter(fakeCli, dynfake.NewSimpleDynamicClient(scheme)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) originalPVC := getOriginalPVCManifest(testSuite.pvcName, testSuite.storageClass) createPVC(c, testSuite.namespace, originalPVC, fakeCli) @@ -99,11 +99,11 @@ func (testSuite *RestoreCSISnapshotTestSuite) TestRestoreCSISnapshot(c *C) { Namespace: testSuite.namespace, } err = fakeSnapshotter.Create(ctx, testSuite.pvcName, &testSuite.volumeSnapshotClass, false, fakeSnapshotMeta) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) vs, err := fakeSnapshotter.Get(ctx, testSuite.snapName, testSuite.namespace) - c.Assert(err, IsNil) - c.Assert(vs.Name, Equals, testSuite.snapName) + c.Assert(err, check.IsNil) + c.Assert(vs.Name, check.Equals, testSuite.snapName) restoreArgs := restoreCSISnapshotArgs{ Name: testSuite.snapName, @@ -116,26 +116,26 @@ func (testSuite *RestoreCSISnapshotTestSuite) TestRestoreCSISnapshot(c *C) { Labels: nil, } pvc, err := restoreCSISnapshot(ctx, fakeCli, restoreArgs) - c.Assert(err, IsNil) - c.Assert(pvc.Name, Equals, testSuite.newPVCName) + c.Assert(err, check.IsNil) + c.Assert(pvc.Name, check.Equals, testSuite.newPVCName) err = fakeCli.CoreV1().Namespaces().Delete(ctx, testSuite.namespace, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } -func (testSuite *RestoreCSISnapshotTestSuite) TestValidateVolumeModeArg(c *C) { +func (testSuite *RestoreCSISnapshotTestSuite) TestValidateVolumeModeArg(c *check.C) { for _, scenario := range []struct { Arg corev1.PersistentVolumeMode - ExpectedErr Checker + ExpectedErr check.Checker }{ { Arg: "test", - ExpectedErr: NotNil, + ExpectedErr: check.NotNil, }, { Arg: corev1.PersistentVolumeFilesystem, - ExpectedErr: IsNil, + ExpectedErr: check.IsNil, }, } { err := validateVolumeModeArg(scenario.Arg) @@ -143,18 +143,18 @@ func (testSuite *RestoreCSISnapshotTestSuite) TestValidateVolumeModeArg(c *C) { } } -func (testSuite *RestoreCSISnapshotTestSuite) TestValidateAccessModeArg(c *C) { +func (testSuite *RestoreCSISnapshotTestSuite) TestValidateAccessModeArg(c *check.C) { for _, scenario := range []struct { Arg []corev1.PersistentVolumeAccessMode - ExpectedErr Checker + ExpectedErr check.Checker }{ { Arg: []corev1.PersistentVolumeAccessMode{"test"}, - ExpectedErr: NotNil, + ExpectedErr: check.NotNil, }, { Arg: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - ExpectedErr: IsNil, + ExpectedErr: check.IsNil, }, } { err := validateVolumeAccessModesArg(scenario.Arg) @@ -162,9 +162,9 @@ func (testSuite *RestoreCSISnapshotTestSuite) TestValidateAccessModeArg(c *C) { } } -func createPVC(c *C, namespace string, pvc *corev1.PersistentVolumeClaim, fakeCli *fake.Clientset) { +func createPVC(c *check.C, namespace string, pvc *corev1.PersistentVolumeClaim, fakeCli *fake.Clientset) { _, err := fakeCli.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } func getOriginalPVCManifest(pvcName, storageClassName string) *corev1.PersistentVolumeClaim { diff --git a/pkg/function/restore_data_test.go b/pkg/function/restore_data_test.go index d438db10d7..e74ed9d835 100644 --- a/pkg/function/restore_data_test.go +++ b/pkg/function/restore_data_test.go @@ -15,7 +15,7 @@ package function import ( - . "gopkg.in/check.v1" + "gopkg.in/check.v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" "github.com/kanisterio/kanister/pkg/param" @@ -23,13 +23,13 @@ import ( type RestoreDataTestSuite struct{} -var _ = Suite(&RestoreDataTestSuite{}) +var _ = check.Suite(&RestoreDataTestSuite{}) -func (s *RestoreDataTestSuite) TestValidateAndGetOptArgs(c *C) { +func (s *RestoreDataTestSuite) TestValidateAndGetOptArgs(c *check.C) { testCases := []struct { name string args map[string]interface{} - errChecker Checker + errChecker check.Checker tp param.TemplateParams }{ { @@ -38,7 +38,7 @@ func (s *RestoreDataTestSuite) TestValidateAndGetOptArgs(c *C) { RestoreDataPodArg: "some-pod", RestoreDataBackupTagArg: "backup123", }, - errChecker: IsNil, + errChecker: check.IsNil, }, { name: "Args with Vols", @@ -46,7 +46,7 @@ func (s *RestoreDataTestSuite) TestValidateAndGetOptArgs(c *C) { RestoreDataVolsArg: map[string]string{"pvc": "mount"}, RestoreDataBackupTagArg: "backup123", }, - errChecker: IsNil, + errChecker: check.IsNil, }, { name: "Args with Pod and Vols", @@ -55,12 +55,12 @@ func (s *RestoreDataTestSuite) TestValidateAndGetOptArgs(c *C) { RestoreDataVolsArg: map[string]string{"pvc": "mount"}, RestoreDataBackupTagArg: "backup123", }, - errChecker: NotNil, + errChecker: check.NotNil, }, { name: "Empty Args", args: map[string]interface{}{}, - errChecker: NotNil, + errChecker: check.NotNil, }, { name: "Args with backupTag", @@ -68,7 +68,7 @@ func (s *RestoreDataTestSuite) TestValidateAndGetOptArgs(c *C) { RestoreDataPodArg: "some-pod", RestoreDataBackupTagArg: "backup123", }, - errChecker: IsNil, + errChecker: check.IsNil, }, { name: "Args with ID", @@ -76,7 +76,7 @@ func (s *RestoreDataTestSuite) TestValidateAndGetOptArgs(c *C) { RestoreDataPodArg: "some-pod", RestoreDataBackupIdentifierArg: "backup123", }, - errChecker: IsNil, + errChecker: check.IsNil, }, { name: "Args with backupTag and ID", @@ -85,7 +85,7 @@ func (s *RestoreDataTestSuite) TestValidateAndGetOptArgs(c *C) { RestoreDataBackupTagArg: "backup123", RestoreDataBackupIdentifierArg: "backup123", }, - errChecker: NotNil, + errChecker: check.NotNil, }, { name: "Args with podOverride", @@ -101,7 +101,7 @@ func (s *RestoreDataTestSuite) TestValidateAndGetOptArgs(c *C) { }, }, }, - errChecker: IsNil, + errChecker: check.IsNil, tp: param.TemplateParams{ PodOverride: crv1alpha1.JSONMap{ "dnsPolicy": "ClusterFirst", @@ -111,6 +111,6 @@ func (s *RestoreDataTestSuite) TestValidateAndGetOptArgs(c *C) { } for _, tc := range testCases { _, _, _, _, _, _, _, _, err := validateAndGetOptArgs(tc.args, tc.tp) - c.Check(err, tc.errChecker, Commentf("Case %s failed", tc.name)) + c.Check(err, tc.errChecker, check.Commentf("Case %s failed", tc.name)) } } diff --git a/pkg/function/scale_test.go b/pkg/function/scale_test.go index 8278c80cd2..ae05c086f4 100644 --- a/pkg/function/scale_test.go +++ b/pkg/function/scale_test.go @@ -19,7 +19,7 @@ import ( "fmt" osversioned "github.com/openshift/client-go/apps/clientset/versioned" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/dynamic/fake" @@ -42,24 +42,24 @@ type ScaleSuite struct { namespace string } -var _ = Suite(&ScaleSuite{}) +var _ = check.Suite(&ScaleSuite{}) -func (s *ScaleSuite) SetUpTest(c *C) { +func (s *ScaleSuite) SetUpTest(c *check.C) { config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cli, err := kubernetes.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) crCli, err := versioned.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) osCli, err := osversioned.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli s.crCli = crCli s.osCli = osCli ctx := context.Background() err = resource.CreateCustomResources(context.Background(), config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -67,19 +67,19 @@ func (s *ScaleSuite) SetUpTest(c *C) { }, } cns, err := s.cli.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name sec := testutil.NewTestProfileSecret() sec, err = s.cli.CoreV1().Secrets(s.namespace).Create(ctx, sec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) p := testutil.NewTestProfile(s.namespace, sec.GetName()) _, err = crCli.CrV1alpha1().Profiles(s.namespace).Create(ctx, p, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *ScaleSuite) TearDownTest(c *C) { +func (s *ScaleSuite) TearDownTest(c *check.C) { if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } @@ -131,7 +131,7 @@ func newScaleBlueprint(kind string, scaleUpCount string) *crv1alpha1.Blueprint { } } -func (s *ScaleSuite) TestScaleDeployment(c *C) { +func (s *ScaleSuite) TestScaleDeployment(c *check.C) { ctx := context.Background() var originalReplicaCount int32 = 1 d := testutil.NewTestDeployment(originalReplicaCount) @@ -144,10 +144,10 @@ func (s *ScaleSuite) TestScaleDeployment(c *C) { } d, err := s.cli.AppsV1().Deployments(s.namespace).Create(ctx, d, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnDeploymentReady(ctx, s.cli, d.GetNamespace(), d.GetName()) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) kind := "Deployment" as := crv1alpha1.ActionSpec{ @@ -164,36 +164,36 @@ func (s *ScaleSuite) TestScaleDeployment(c *C) { var scaleUpToReplicas int32 = 2 for _, action := range []string{"scaleUp", "echoHello", "scaleDown"} { tp, err := param.New(ctx, s.cli, fake.NewSimpleDynamicClient(k8sscheme.Scheme, d), s.crCli, s.osCli, as) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) bp := newScaleBlueprint(kind, fmt.Sprintf("%d", scaleUpToReplicas)) phases, err := kanister.GetPhases(*bp, action, kanister.DefaultVersion, *tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, p := range phases { out, err := p.Exec(context.Background(), *bp, action, *tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // at the start workload has `originalReplicaCount` replicas, the first phase that is going to get executed is // `scaleUp` which would change that count to 2, but the function would return the count that workload originally had // i.e., `originalReplicaCount` if action == "scaleUp" { - c.Assert(out[outputArtifactOriginalReplicaCount], Equals, originalReplicaCount) + c.Assert(out[outputArtifactOriginalReplicaCount], check.Equals, originalReplicaCount) } // `scaleDown` is going to change the replica count to 0 from 2. Because the workload already had 2 replicas // (previous phase), so ouptut artifact from the function this time would be what the workload already had i.e., 2 if action == "scaleDown" { - c.Assert(out[outputArtifactOriginalReplicaCount], Equals, scaleUpToReplicas) + c.Assert(out[outputArtifactOriginalReplicaCount], check.Equals, scaleUpToReplicas) } } ok, _, err := kube.DeploymentReady(ctx, s.cli, d.GetNamespace(), d.GetName()) - c.Assert(err, IsNil) - c.Assert(ok, Equals, true) + c.Assert(err, check.IsNil) + c.Assert(ok, check.Equals, true) } pods, err := s.cli.CoreV1().Pods(s.namespace).List(ctx, metav1.ListOptions{}) - c.Assert(err, IsNil) - c.Assert(pods.Items, HasLen, 0) + c.Assert(err, check.IsNil) + c.Assert(pods.Items, check.HasLen, 0) } -func (s *ScaleSuite) TestScaleStatefulSet(c *C) { +func (s *ScaleSuite) TestScaleStatefulSet(c *check.C) { ctx := context.Background() var originalReplicaCount int32 = 1 ss := testutil.NewTestStatefulSet(originalReplicaCount) @@ -205,10 +205,10 @@ func (s *ScaleSuite) TestScaleStatefulSet(c *C) { }, } ss, err := s.cli.AppsV1().StatefulSets(s.namespace).Create(ctx, ss, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnStatefulSetReady(ctx, s.cli, ss.GetNamespace(), ss.GetName()) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) kind := "StatefulSet" as := crv1alpha1.ActionSpec{ @@ -226,35 +226,35 @@ func (s *ScaleSuite) TestScaleStatefulSet(c *C) { var scaleUpToReplicas int32 = 2 for _, action := range []string{"scaleUp", "echoHello", "scaleDown"} { tp, err := param.New(ctx, s.cli, fake.NewSimpleDynamicClient(k8sscheme.Scheme, ss), s.crCli, s.osCli, as) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) bp := newScaleBlueprint(kind, fmt.Sprintf("%d", scaleUpToReplicas)) phases, err := kanister.GetPhases(*bp, action, kanister.DefaultVersion, *tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, p := range phases { out, err := p.Exec(context.Background(), *bp, action, *tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // at the start workload has `originalReplicaCount` replicas, the first phase that is going to get executed is // `scaleUp` which would change that count to 2, but the function would return the count that workload originally had // i.e., `originalReplicaCount` if action == "scaleUp" { - c.Assert(out[outputArtifactOriginalReplicaCount], Equals, originalReplicaCount) + c.Assert(out[outputArtifactOriginalReplicaCount], check.Equals, originalReplicaCount) } // `scaleDown` is going to change the replica count to 0 from 2. Because the workload already had 2 replicas // (previous phase), so ouptut artifact from the function this time would be what the workload already had i.e., 2 if action == "scaleDown" { - c.Assert(out[outputArtifactOriginalReplicaCount], Equals, scaleUpToReplicas) + c.Assert(out[outputArtifactOriginalReplicaCount], check.Equals, scaleUpToReplicas) } } ok, _, err := kube.StatefulSetReady(ctx, s.cli, ss.GetNamespace(), ss.GetName()) - c.Assert(err, IsNil) - c.Assert(ok, Equals, true) + c.Assert(err, check.IsNil) + c.Assert(ok, check.Equals, true) } _, err = s.cli.CoreV1().Pods(s.namespace).List(ctx, metav1.ListOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *ScaleSuite) TestGetArgs(c *C) { +func (s *ScaleSuite) TestGetArgs(c *check.C) { for _, tc := range []struct { tp param.TemplateParams args map[string]interface{} @@ -263,12 +263,12 @@ func (s *ScaleSuite) TestGetArgs(c *C) { wantName string wantReplicas int32 wantWaitForReady bool - check Checker + check check.Checker }{ { tp: param.TemplateParams{}, args: map[string]interface{}{ScaleWorkloadReplicas: 2}, - check: NotNil, + check: check.NotNil, }, { tp: param.TemplateParams{}, @@ -284,7 +284,7 @@ func (s *ScaleSuite) TestGetArgs(c *C) { wantNamespace: "foo", wantReplicas: int32(2), wantWaitForReady: false, - check: IsNil, + check: check.IsNil, }, { tp: param.TemplateParams{ @@ -301,7 +301,7 @@ func (s *ScaleSuite) TestGetArgs(c *C) { wantNamespace: "foo", wantReplicas: int32(2), wantWaitForReady: true, - check: IsNil, + check: check.IsNil, }, { tp: param.TemplateParams{ @@ -318,7 +318,7 @@ func (s *ScaleSuite) TestGetArgs(c *C) { wantNamespace: "foo", wantReplicas: int32(2), wantWaitForReady: true, - check: IsNil, + check: check.IsNil, }, { tp: param.TemplateParams{ @@ -338,7 +338,7 @@ func (s *ScaleSuite) TestGetArgs(c *C) { wantNamespace: "notfoo", wantReplicas: int32(2), wantWaitForReady: true, - check: IsNil, + check: check.IsNil, }, } { s := scaleWorkloadFunc{} @@ -347,10 +347,10 @@ func (s *ScaleSuite) TestGetArgs(c *C) { if err != nil { continue } - c.Assert(s.namespace, Equals, tc.wantNamespace) - c.Assert(s.name, Equals, tc.wantName) - c.Assert(s.kind, Equals, tc.wantKind) - c.Assert(s.replicas, Equals, tc.wantReplicas) - c.Assert(s.waitForReady, Equals, tc.wantWaitForReady) + c.Assert(s.namespace, check.Equals, tc.wantNamespace) + c.Assert(s.name, check.Equals, tc.wantName) + c.Assert(s.kind, check.Equals, tc.wantKind) + c.Assert(s.replicas, check.Equals, tc.wantReplicas) + c.Assert(s.waitForReady, check.Equals, tc.wantWaitForReady) } } diff --git a/pkg/function/utils_test.go b/pkg/function/utils_test.go index ad4539fb1d..98a55ef008 100644 --- a/pkg/function/utils_test.go +++ b/pkg/function/utils_test.go @@ -15,7 +15,7 @@ package function import ( - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -26,49 +26,49 @@ import ( type UtilsTestSuite struct { } -var _ = Suite(&UtilsTestSuite{}) +var _ = check.Suite(&UtilsTestSuite{}) -func (s *UtilsTestSuite) TestValidateProfile(c *C) { +func (s *UtilsTestSuite) TestValidateProfile(c *check.C) { testCases := []struct { name string profile *param.Profile - errChecker Checker + errChecker check.Checker }{ - {"Valid Profile", newValidProfile(), IsNil}, - {"Valid Profile with Secret Credentials", newValidProfileWithSecretCredentials(), IsNil}, - {"Invalid Profile", newInvalidProfile(), NotNil}, - {"Invalid Profile with Secret Credentials", newInvalidProfileWithSecretCredentials(), NotNil}, - {"Nil Profile", nil, NotNil}, + {"Valid Profile", newValidProfile(), check.IsNil}, + {"Valid Profile with Secret Credentials", newValidProfileWithSecretCredentials(), check.IsNil}, + {"Invalid Profile", newInvalidProfile(), check.NotNil}, + {"Invalid Profile with Secret Credentials", newInvalidProfileWithSecretCredentials(), check.NotNil}, + {"Nil Profile", nil, check.NotNil}, } for _, tc := range testCases { err := ValidateProfile(tc.profile) - c.Check(err, tc.errChecker, Commentf("Test %s Failed", tc.name)) + c.Check(err, tc.errChecker, check.Commentf("Test %s Failed", tc.name)) } } -func (s *UtilsTestSuite) TestFetchPodVolumes(c *C) { +func (s *UtilsTestSuite) TestFetchPodVolumes(c *check.C) { testCases := []struct { name string tp param.TemplateParams pod string vols map[string]string - errChecker Checker + errChecker check.Checker }{ - {"Valid Deployment Pod", newValidDeploymentTP(), "pod1", map[string]string{"pvc1": "path1"}, IsNil}, - {"Valid StatefulSet Pod", newValidStatefulSetTP(), "pod2", map[string]string{"pvc2": "path2", "pvc3": "path3"}, IsNil}, - {"Invalid Deployment Pod", newValidDeploymentTP(), "pod3", nil, NotNil}, - {"Invalid StatefulSet Pod", newValidStatefulSetTP(), "pod4", nil, NotNil}, - {"Deployment Pod with no volumes", newInvalidDeploymentTP(), "pod2", nil, NotNil}, - {"Invalid Template Params", param.TemplateParams{}, "pod1", nil, NotNil}, + {"Valid Deployment Pod", newValidDeploymentTP(), "pod1", map[string]string{"pvc1": "path1"}, check.IsNil}, + {"Valid StatefulSet Pod", newValidStatefulSetTP(), "pod2", map[string]string{"pvc2": "path2", "pvc3": "path3"}, check.IsNil}, + {"Invalid Deployment Pod", newValidDeploymentTP(), "pod3", nil, check.NotNil}, + {"Invalid StatefulSet Pod", newValidStatefulSetTP(), "pod4", nil, check.NotNil}, + {"Deployment Pod with no volumes", newInvalidDeploymentTP(), "pod2", nil, check.NotNil}, + {"Invalid Template Params", param.TemplateParams{}, "pod1", nil, check.NotNil}, } for _, tc := range testCases { vols, err := FetchPodVolumes(tc.pod, tc.tp) - c.Check(err, tc.errChecker, Commentf("Test: %s Failed!", tc.name)) - c.Check(vols, DeepEquals, tc.vols, Commentf("Test: %s Failed!", tc.name)) + c.Check(err, tc.errChecker, check.Commentf("Test: %s Failed!", tc.name)) + c.Check(vols, check.DeepEquals, tc.vols, check.Commentf("Test: %s Failed!", tc.name)) } } -func (s *UtilsTestSuite) TestResolveArtifactPrefix(c *C) { +func (s *UtilsTestSuite) TestResolveArtifactPrefix(c *check.C) { for _, tc := range []struct { prefix string expected string @@ -99,11 +99,11 @@ func (s *UtilsTestSuite) TestResolveArtifactPrefix(c *C) { }, } { res := ResolveArtifactPrefix(tc.prefix, newValidProfile()) - c.Check(res, Equals, tc.expected) + c.Check(res, check.Equals, tc.expected) } } -func (s *UtilsTestSuite) TestMergeBPAnnotations(c *C) { +func (s *UtilsTestSuite) TestMergeBPAnnotations(c *check.C) { for _, tc := range []struct { actionSetAnnotations map[string]string bpAnnotations map[string]string @@ -197,11 +197,11 @@ func (s *UtilsTestSuite) TestMergeBPAnnotations(c *C) { } { var asAnnotations ActionSetAnnotations = tc.actionSetAnnotations anotations := asAnnotations.MergeBPAnnotations(tc.bpAnnotations) - c.Assert(anotations, DeepEquals, tc.expectedAnnotations) + c.Assert(anotations, check.DeepEquals, tc.expectedAnnotations) } } -func (s *UtilsTestSuite) TestMergeBPLabels(c *C) { +func (s *UtilsTestSuite) TestMergeBPLabels(c *check.C) { for _, tc := range []struct { actionSetLabels map[string]string bpLabels map[string]string @@ -291,7 +291,7 @@ func (s *UtilsTestSuite) TestMergeBPLabels(c *C) { } { var actionSetLabels ActionSetLabels = tc.actionSetLabels labels := actionSetLabels.MergeBPLabels(tc.bpLabels) - c.Assert(labels, DeepEquals, tc.expectedLabels) + c.Assert(labels, check.DeepEquals, tc.expectedLabels) } } diff --git a/pkg/function/wait_for_snapshot_completion_test.go b/pkg/function/wait_for_snapshot_completion_test.go index b2a9c9eee1..a7595cfa72 100644 --- a/pkg/function/wait_for_snapshot_completion_test.go +++ b/pkg/function/wait_for_snapshot_completion_test.go @@ -18,7 +18,7 @@ import ( "context" "encoding/json" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -30,9 +30,9 @@ import ( type WaitForSnapshotCompletionTestSuite struct{} -var _ = Suite(&WaitForSnapshotCompletionTestSuite{}) +var _ = check.Suite(&WaitForSnapshotCompletionTestSuite{}) -func (s *WaitForSnapshotCompletionTestSuite) TestWaitwithRole(c *C) { +func (s *WaitForSnapshotCompletionTestSuite) TestWaitwithRole(c *check.C) { ctx := context.Background() mockGetter := mockblockstorage.NewGetter() pvcData1 := []VolumeSnapshotInfo{ @@ -40,16 +40,16 @@ func (s *WaitForSnapshotCompletionTestSuite) TestWaitwithRole(c *C) { {SnapshotID: "snap-2", Type: blockstorage.TypeEBS, Region: "us-west-2", PVCName: "pvc-2", Az: "us-west-2a", VolumeType: "ssd"}, } info, err := json.Marshal(pvcData1) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) snapinfo := string(info) for _, tc := range []struct { snapshotinfo string - check Checker + check check.Checker profile *param.Profile }{ { snapshotinfo: snapinfo, - check: IsNil, + check: check.IsNil, profile: ¶m.Profile{ Location: crv1alpha1.Location{ Type: crv1alpha1.LocationTypeS3Compliant, @@ -66,7 +66,7 @@ func (s *WaitForSnapshotCompletionTestSuite) TestWaitwithRole(c *C) { }, { snapshotinfo: snapinfo, - check: IsNil, + check: check.IsNil, profile: ¶m.Profile{ Location: crv1alpha1.Location{ Type: crv1alpha1.LocationTypeS3Compliant, diff --git a/pkg/function/wait_test.go b/pkg/function/wait_test.go index 417fbc6945..e47d3c6497 100644 --- a/pkg/function/wait_test.go +++ b/pkg/function/wait_test.go @@ -18,7 +18,7 @@ import ( "context" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -30,7 +30,7 @@ import ( "github.com/kanisterio/kanister/pkg/testutil" ) -var _ = Suite(&WaitSuite{}) +var _ = check.Suite(&WaitSuite{}) type WaitSuite struct { cli kubernetes.Interface @@ -39,9 +39,9 @@ type WaitSuite struct { statefulset string } -func (s *WaitSuite) SetUpSuite(c *C) { +func (s *WaitSuite) SetUpSuite(c *check.C) { cli, err := kube.NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli ns := &corev1.Namespace{ @@ -50,17 +50,17 @@ func (s *WaitSuite) SetUpSuite(c *C) { }, } cns, err := s.cli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) d, err := s.cli.AppsV1().Deployments(cns.Name).Create(context.TODO(), testutil.NewTestDeployment(int32(1)), metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) sts, err := s.cli.AppsV1().StatefulSets(cns.Name).Create(context.TODO(), testutil.NewTestStatefulSet(int32(1)), metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name s.deploy = d.Name s.statefulset = sts.Name } -func (s *WaitSuite) TearDownSuite(c *C) { +func (s *WaitSuite) TearDownSuite(c *check.C) { if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } @@ -208,34 +208,34 @@ func newWaitBlueprint(phases ...crv1alpha1.BlueprintPhase) *crv1alpha1.Blueprint } } -func (s *WaitSuite) TestWait(c *C) { +func (s *WaitSuite) TestWait(c *check.C) { tp := param.TemplateParams{ Time: time.Now().String(), } action := "test" for _, tc := range []struct { bp *crv1alpha1.Blueprint - checker Checker + checker check.Checker }{ { bp: newWaitBlueprint(waitDeployPhase(s.namespace, s.deploy)), - checker: IsNil, + checker: check.IsNil, }, { bp: newWaitBlueprint(waitStatefulSetPhase(s.namespace, s.statefulset)), - checker: IsNil, + checker: check.IsNil, }, { bp: newWaitBlueprint(waitNsPhase(s.namespace)), - checker: IsNil, + checker: check.IsNil, }, { bp: newWaitBlueprint(waitNsTimeoutPhase(s.namespace)), - checker: NotNil, + checker: check.NotNil, }, } { phases, err := kanister.GetPhases(*tc.bp, action, kanister.DefaultVersion, tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, p := range phases { _, err := p.Exec(context.TODO(), *tc.bp, action, tp) c.Assert(err, tc.checker) diff --git a/pkg/function/waitv2_test.go b/pkg/function/waitv2_test.go index 1800a2538e..b5f005dec7 100644 --- a/pkg/function/waitv2_test.go +++ b/pkg/function/waitv2_test.go @@ -18,7 +18,7 @@ import ( "context" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -30,7 +30,7 @@ import ( "github.com/kanisterio/kanister/pkg/testutil" ) -var _ = Suite(&WaitV2Suite{}) +var _ = check.Suite(&WaitV2Suite{}) type WaitV2Suite struct { cli kubernetes.Interface @@ -39,9 +39,9 @@ type WaitV2Suite struct { statefulset string } -func (s *WaitV2Suite) SetUpSuite(c *C) { +func (s *WaitV2Suite) SetUpSuite(c *check.C) { cli, err := kube.NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli ns := &corev1.Namespace{ @@ -50,17 +50,17 @@ func (s *WaitV2Suite) SetUpSuite(c *C) { }, } cns, err := s.cli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) d, err := s.cli.AppsV1().Deployments(cns.Name).Create(context.TODO(), testutil.NewTestDeployment(int32(1)), metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) sts, err := s.cli.AppsV1().StatefulSets(cns.Name).Create(context.TODO(), testutil.NewTestStatefulSet(int32(1)), metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name s.deploy = d.Name s.statefulset = sts.Name } -func (s *WaitV2Suite) TearDownSuite(c *C) { +func (s *WaitV2Suite) TearDownSuite(c *check.C) { if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } @@ -194,34 +194,34 @@ func newWaitV2Blueprint(phases ...crv1alpha1.BlueprintPhase) *crv1alpha1.Bluepri } } -func (s *WaitV2Suite) TestWaitV2(c *C) { +func (s *WaitV2Suite) TestWaitV2(c *check.C) { tp := param.TemplateParams{ Time: time.Now().String(), } action := "test" for _, tc := range []struct { bp *crv1alpha1.Blueprint - checker Checker + checker check.Checker }{ { bp: newWaitV2Blueprint(waitV2DeployPhase(s.namespace, s.deploy)), - checker: IsNil, + checker: check.IsNil, }, { bp: newWaitV2Blueprint(waitV2StatefulSetPhase(s.namespace, s.statefulset)), - checker: IsNil, + checker: check.IsNil, }, { bp: newWaitV2Blueprint(waitV2NsPhase(s.namespace)), - checker: IsNil, + checker: check.IsNil, }, { bp: newWaitV2Blueprint(waitV2NsTimeoutPhase(s.namespace)), - checker: NotNil, + checker: check.NotNil, }, } { phases, err := kanister.GetPhases(*tc.bp, action, kanister.DefaultVersion, tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, p := range phases { _, err := p.Exec(context.TODO(), *tc.bp, action, tp) c.Assert(err, tc.checker) diff --git a/pkg/helm/client_test.go b/pkg/helm/client_test.go index a42a5f5630..10f5119f30 100644 --- a/pkg/helm/client_test.go +++ b/pkg/helm/client_test.go @@ -18,7 +18,7 @@ import ( "context" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type ExecSuite struct { @@ -29,35 +29,35 @@ type ExecSuite struct { } // Valid command -var _ = Suite(&ExecSuite{ +var _ = check.Suite(&ExecSuite{ command: "echo", args: []string{"success"}, output: "success", }) // Invalid command -var _ = Suite(&ExecSuite{ +var _ = check.Suite(&ExecSuite{ command: "invalid", err: true, }) // Check timeout -var _ = Suite(&ExecSuite{ +var _ = check.Suite(&ExecSuite{ command: "sleep", args: []string{"11m"}, err: true, }) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } -func (s *ExecSuite) TestRunCmdWithTimeout(c *C) { +func (s *ExecSuite) TestRunCmdWithTimeout(c *check.C) { ctx := context.Background() out, err := RunCmdWithTimeout(ctx, s.command, s.args) if s.err { - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) return } - c.Assert(err, IsNil) - c.Assert(out, Equals, s.output) + c.Assert(err, check.IsNil) + c.Assert(out, check.Equals, s.output) } diff --git a/pkg/jsonpath/jsonpath_test.go b/pkg/jsonpath/jsonpath_test.go index 9c83e52256..f6d0f00bd0 100644 --- a/pkg/jsonpath/jsonpath_test.go +++ b/pkg/jsonpath/jsonpath_test.go @@ -17,17 +17,17 @@ package jsonpath import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type JsonpathSuite struct{} -var _ = Suite(&JsonpathSuite{}) +var _ = check.Suite(&JsonpathSuite{}) const deploy = `apiVersion: apps/v1 kind: Deployment @@ -95,41 +95,41 @@ status: updatedReplicas: 3 ` -func runtimeObjFromYAML(c *C, specs string) runtime.Object { +func runtimeObjFromYAML(c *check.C, specs string) runtime.Object { decode := scheme.Codecs.UniversalDeserializer().Decode obj, _, err := decode([]byte(specs), nil, nil) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) return obj } -func (js *JsonpathSuite) TestDeploymentReady(c *C) { +func (js *JsonpathSuite) TestDeploymentReady(c *check.C) { obj := runtimeObjFromYAML(c, deploy) replica, err := ResolveJsonpathToString(obj, "{.spec.replicas}") - c.Assert(err, IsNil) - c.Assert(replica, Equals, "3") + c.Assert(err, check.IsNil) + c.Assert(replica, check.Equals, "3") readyReplicas, err := ResolveJsonpathToString(obj, "{.status.replicas}") - c.Assert(err, IsNil) - c.Assert(readyReplicas, Equals, "3") + c.Assert(err, check.IsNil) + c.Assert(readyReplicas, check.Equals, "3") availReplicas, err := ResolveJsonpathToString(obj, "{.status.availableReplicas}") - c.Assert(err, IsNil) - c.Assert(availReplicas, Equals, "3") + c.Assert(err, check.IsNil) + c.Assert(availReplicas, check.Equals, "3") // Any condition with type Available condType, err := ResolveJsonpathToString(obj, `{.status.conditions[?(@.type == "Available")].type}`) - c.Assert(err, IsNil) - c.Assert(condType, Equals, "Available") + c.Assert(err, check.IsNil) + c.Assert(condType, check.Equals, "Available") condStatus, err := ResolveJsonpathToString(obj, `{.status.conditions[?(@.type == "Available")].status}`) - c.Assert(err, IsNil) - c.Assert(condStatus, Equals, "True") + c.Assert(err, check.IsNil) + c.Assert(condStatus, check.Equals, "True") _, err = ResolveJsonpathToString(obj, "{.status.something}") - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (js *JsonpathSuite) TestFindJsonpathArgs(c *C) { +func (js *JsonpathSuite) TestFindJsonpathArgs(c *check.C) { for _, tc := range []struct { arg string expJsonpathArg map[string]string @@ -168,6 +168,6 @@ func (js *JsonpathSuite) TestFindJsonpathArgs(c *C) { }, } { m := FindJsonpathArgs(tc.arg) - c.Assert(m, DeepEquals, tc.expJsonpathArg) + c.Assert(m, check.DeepEquals, tc.expJsonpathArg) } } diff --git a/pkg/kanctl/actionset_test.go b/pkg/kanctl/actionset_test.go index 3213c1b5d6..3e4b3101b3 100644 --- a/pkg/kanctl/actionset_test.go +++ b/pkg/kanctl/actionset_test.go @@ -17,22 +17,22 @@ package kanctl import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" ) type KanctlTestSuite struct{} -var _ = Suite(&KanctlTestSuite{}) +var _ = check.Suite(&KanctlTestSuite{}) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } -func (k *KanctlTestSuite) TestParseGenericObjectReference(c *C) { +func (k *KanctlTestSuite) TestParseGenericObjectReference(c *check.C) { for _, tc := range []struct { objectFlag string expected crv1alpha1.ObjectReference - err Checker + err check.Checker }{ // not core group { @@ -44,7 +44,7 @@ func (k *KanctlTestSuite) TestParseGenericObjectReference(c *C) { Name: "name", Namespace: "namespace", }, - err: IsNil, + err: check.IsNil, }, // core group { @@ -56,7 +56,7 @@ func (k *KanctlTestSuite) TestParseGenericObjectReference(c *C) { Name: "etcd-minikube", Namespace: "kube-system", }, - err: IsNil, + err: check.IsNil, }, // CRs { @@ -68,16 +68,16 @@ func (k *KanctlTestSuite) TestParseGenericObjectReference(c *C) { Name: "s3-profile-5fx9w", Namespace: "kanister", }, - err: IsNil, + err: check.IsNil, }, } { a, err := parseGenericObjectReference(tc.objectFlag) c.Check(err, tc.err) - c.Assert(a, DeepEquals, tc.expected) + c.Assert(a, check.DeepEquals, tc.expected) } } -func (k *KanctlTestSuite) TestGenerateActionSetName(c *C) { +func (k *KanctlTestSuite) TestGenerateActionSetName(c *check.C) { var testCases = []struct { actionName string actionSetName string @@ -101,18 +101,18 @@ func (k *KanctlTestSuite) TestGenerateActionSetName(c *C) { } actual, err := generateActionSetName(params) - c.Assert(err, DeepEquals, tc.expectedErr) + c.Assert(err, check.DeepEquals, tc.expectedErr) if tc.actionSetName != "" || tc.expected == "" { // if --name is provided we just use that we dont derive name - c.Assert(actual, DeepEquals, tc.expected) + c.Assert(actual, check.DeepEquals, tc.expected) } else { // random 5 chars are added at the end if name is derived by us - c.Assert(actual[0:len(actual)-5], DeepEquals, tc.expected) + c.Assert(actual[0:len(actual)-5], check.DeepEquals, tc.expected) } } } -func (k *KanctlTestSuite) TestParseLabels(c *C) { +func (k *KanctlTestSuite) TestParseLabels(c *check.C) { for _, tc := range []struct { flagValue string expectedLabels map[string]string @@ -177,7 +177,7 @@ func (k *KanctlTestSuite) TestParseLabels(c *C) { }, } { op, err := parseLabels(tc.flagValue) - c.Assert(err, DeepEquals, tc.expectedErr) - c.Assert(op, DeepEquals, tc.expectedLabels) + c.Assert(err, check.DeepEquals, tc.expectedErr) + c.Assert(op, check.DeepEquals, tc.expectedLabels) } } diff --git a/pkg/kanister_test.go b/pkg/kanister_test.go index d736939741..a28b6baacd 100644 --- a/pkg/kanister_test.go +++ b/pkg/kanister_test.go @@ -17,8 +17,8 @@ package kanister import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } diff --git a/pkg/kopia/cli/repository/storage/gcs/gcs_opts_test.go b/pkg/kopia/cli/repository/storage/gcs/gcs_opts_test.go index 071d751a44..cec4e21487 100644 --- a/pkg/kopia/cli/repository/storage/gcs/gcs_opts_test.go +++ b/pkg/kopia/cli/repository/storage/gcs/gcs_opts_test.go @@ -17,10 +17,9 @@ package gcs import ( "testing" - "gopkg.in/check.v1" - "github.com/kanisterio/safecli/command" "github.com/kanisterio/safecli/test" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/kopia/cli" ) diff --git a/pkg/kopia/command/blob_test.go b/pkg/kopia/command/blob_test.go index 2658f0a9f3..53f3acd9d0 100644 --- a/pkg/kopia/command/blob_test.go +++ b/pkg/kopia/command/blob_test.go @@ -18,17 +18,17 @@ import ( "strings" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. -func TestKopiaCommandWrappers(t *testing.T) { TestingT(t) } +func TestKopiaCommandWrappers(t *testing.T) { check.TestingT(t) } type KopiaBlobTestSuite struct{} -var _ = Suite(&KopiaBlobTestSuite{}) +var _ = check.Suite(&KopiaBlobTestSuite{}) -func (kBlob *KopiaBlobTestSuite) TestBlobCommands(c *C) { +func (kBlob *KopiaBlobTestSuite) TestBlobCommands(c *check.C) { commandArgs := &CommandArgs{ RepoPassword: "encr-key", ConfigFilePath: "path/kopia.config", @@ -59,6 +59,6 @@ func (kBlob *KopiaBlobTestSuite) TestBlobCommands(c *C) { }, } { cmd := strings.Join(tc.f(), " ") - c.Assert(cmd, Equals, tc.expectedLog) + c.Assert(cmd, check.Equals, tc.expectedLog) } } diff --git a/pkg/kopia/command/maintenance_test.go b/pkg/kopia/command/maintenance_test.go index a146aab60c..0a85c982a0 100644 --- a/pkg/kopia/command/maintenance_test.go +++ b/pkg/kopia/command/maintenance_test.go @@ -17,14 +17,14 @@ package command import ( "strings" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type KopiaMaintenanceTestSuite struct{} -var _ = Suite(&KopiaMaintenanceTestSuite{}) +var _ = check.Suite(&KopiaMaintenanceTestSuite{}) -func (kMaintenance *KopiaMaintenanceTestSuite) TestMaintenanceCommands(c *C) { +func (kMaintenance *KopiaMaintenanceTestSuite) TestMaintenanceCommands(c *check.C) { commandArgs := &CommandArgs{ RepoPassword: "encr-key", ConfigFilePath: "path/kopia.config", @@ -96,6 +96,6 @@ func (kMaintenance *KopiaMaintenanceTestSuite) TestMaintenanceCommands(c *C) { }, } { cmd := strings.Join(tc.f(), " ") - c.Check(cmd, Equals, tc.expectedLog) + c.Check(cmd, check.Equals, tc.expectedLog) } } diff --git a/pkg/kopia/command/parse_command_output_test.go b/pkg/kopia/command/parse_command_output_test.go index 2455d0a5ef..976d085cec 100644 --- a/pkg/kopia/command/parse_command_output_test.go +++ b/pkg/kopia/command/parse_command_output_test.go @@ -20,54 +20,54 @@ import ( "github.com/kopia/kopia/fs" "github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot/policy" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type KopiaParseUtilsTestSuite struct{} -var _ = Suite(&KopiaParseUtilsTestSuite{}) +var _ = check.Suite(&KopiaParseUtilsTestSuite{}) -func (kParse *KopiaParseUtilsTestSuite) TestSnapshotIDsFromSnapshot(c *C) { +func (kParse *KopiaParseUtilsTestSuite) TestSnapshotIDsFromSnapshot(c *check.C) { for _, tc := range []struct { log string expectedSnapID string expectedRootID string - errChecker Checker + errChecker check.Checker }{ - {"Created snapshot with root k23cf6d7ff418a0110636399da458abb5 and ID beda41fb4ba7478025778fdc8312355c in 10.8362ms", "beda41fb4ba7478025778fdc8312355c", "k23cf6d7ff418a0110636399da458abb5", IsNil}, - {"Created snapshot with root rootID and ID snapID", "snapID", "rootID", IsNil}, - {" Created snapshot snapID (root rootID)", "", "", NotNil}, - {"root 123abcd", "", "", NotNil}, - {"Invalid message", "", "", NotNil}, - {"Created snapshot with root abc123\n in 5.5001ms", "", "", NotNil}, - {"", "", "", NotNil}, - {"Created snapshot", "", "", NotNil}, - {"Created snapshot ", "", "", NotNil}, - {"Created snapshot with root", "", "", NotNil}, - {"Created snapshot with root rootID", "", "", NotNil}, - {"Created snapshot with root rootID and ID\n snapID in 10ms", "", "", NotNil}, - {"Created snapshot with root rootID in 10ms", "", "", NotNil}, - {"Created snapshot and ID snapID in 10ms", "", "", NotNil}, - {"Created snapshot with ID snapID in 10ms", "", "", NotNil}, - {"Created snapshot snapID\n(root rootID) in 10.8362ms", "", "", NotNil}, - {"Created snapshot snapID in 10.8362ms", "", "", NotNil}, - {"Created snapshot (root rootID) in 10.8362ms", "", "", NotNil}, - {"Created snapshot root rootID in 10.8362ms", "", "", NotNil}, - {"Created snapshot root rootID and ID snapID in 10.8362ms", "", "", NotNil}, - {" root rootID and ID snapID in 10.8362ms", "", "", NotNil}, - {"uploaded snapshot beda41fb4ba7478025778fdc8312355c (root k23cf6d7ff418a0110636399da458abb5) in 10.8362ms", "", "", NotNil}, + {"Created snapshot with root k23cf6d7ff418a0110636399da458abb5 and ID beda41fb4ba7478025778fdc8312355c in 10.8362ms", "beda41fb4ba7478025778fdc8312355c", "k23cf6d7ff418a0110636399da458abb5", check.IsNil}, + {"Created snapshot with root rootID and ID snapID", "snapID", "rootID", check.IsNil}, + {" Created snapshot snapID (root rootID)", "", "", check.NotNil}, + {"root 123abcd", "", "", check.NotNil}, + {"Invalid message", "", "", check.NotNil}, + {"Created snapshot with root abc123\n in 5.5001ms", "", "", check.NotNil}, + {"", "", "", check.NotNil}, + {"Created snapshot", "", "", check.NotNil}, + {"Created snapshot ", "", "", check.NotNil}, + {"Created snapshot with root", "", "", check.NotNil}, + {"Created snapshot with root rootID", "", "", check.NotNil}, + {"Created snapshot with root rootID and ID\n snapID in 10ms", "", "", check.NotNil}, + {"Created snapshot with root rootID in 10ms", "", "", check.NotNil}, + {"Created snapshot and ID snapID in 10ms", "", "", check.NotNil}, + {"Created snapshot with ID snapID in 10ms", "", "", check.NotNil}, + {"Created snapshot snapID\n(root rootID) in 10.8362ms", "", "", check.NotNil}, + {"Created snapshot snapID in 10.8362ms", "", "", check.NotNil}, + {"Created snapshot (root rootID) in 10.8362ms", "", "", check.NotNil}, + {"Created snapshot root rootID in 10.8362ms", "", "", check.NotNil}, + {"Created snapshot root rootID and ID snapID in 10.8362ms", "", "", check.NotNil}, + {" root rootID and ID snapID in 10.8362ms", "", "", check.NotNil}, + {"uploaded snapshot beda41fb4ba7478025778fdc8312355c (root k23cf6d7ff418a0110636399da458abb5) in 10.8362ms", "", "", check.NotNil}, } { snapID, rootID, err := SnapshotIDsFromSnapshot(tc.log) - c.Check(snapID, Equals, tc.expectedSnapID, Commentf("Failed for log: %s", tc.log)) - c.Check(rootID, Equals, tc.expectedRootID, Commentf("Failed for log: %s", tc.log)) - c.Check(err, tc.errChecker, Commentf("Failed for log: %s", tc.log)) + c.Check(snapID, check.Equals, tc.expectedSnapID, check.Commentf("Failed for log: %s", tc.log)) + c.Check(rootID, check.Equals, tc.expectedRootID, check.Commentf("Failed for log: %s", tc.log)) + c.Check(err, tc.errChecker, check.Commentf("Failed for log: %s", tc.log)) } } -func (kParse *KopiaParseUtilsTestSuite) TestLatestSnapshotInfoFromManifestList(c *C) { +func (kParse *KopiaParseUtilsTestSuite) TestLatestSnapshotInfoFromManifestList(c *check.C) { for _, tc := range []struct { output string - checker Checker + checker check.Checker expectedSnapID string expectedBackupPath string }{ @@ -79,13 +79,13 @@ func (kParse *KopiaParseUtilsTestSuite) TestLatestSnapshotInfoFromManifestList(c ]`, expectedSnapID: "00000000000000000000003", expectedBackupPath: "/tmp/aaa3", - checker: IsNil, + checker: check.IsNil, }, { output: "", expectedSnapID: "", expectedBackupPath: "", - checker: NotNil, + checker: check.NotNil, }, { output: `[ @@ -93,7 +93,7 @@ func (kParse *KopiaParseUtilsTestSuite) TestLatestSnapshotInfoFromManifestList(c ]`, expectedSnapID: "", expectedBackupPath: "", - checker: NotNil, + checker: check.NotNil, }, { output: `[ @@ -101,20 +101,20 @@ func (kParse *KopiaParseUtilsTestSuite) TestLatestSnapshotInfoFromManifestList(c ]`, expectedSnapID: "", expectedBackupPath: "", - checker: NotNil, + checker: check.NotNil, }, } { snapID, backupPath, err := LatestSnapshotInfoFromManifestList(tc.output) c.Assert(err, tc.checker) - c.Assert(snapID, Equals, tc.expectedSnapID) - c.Assert(backupPath, Equals, tc.expectedBackupPath) + c.Assert(snapID, check.Equals, tc.expectedSnapID) + c.Assert(backupPath, check.Equals, tc.expectedBackupPath) } } -func (kParse *KopiaParseUtilsTestSuite) TestSnapshotInfoFromSnapshotCreateOutput(c *C) { +func (kParse *KopiaParseUtilsTestSuite) TestSnapshotInfoFromSnapshotCreateOutput(c *check.C) { for _, tc := range []struct { output string - checker Checker + checker check.Checker expectedSnapID string expectedRootID string }{ @@ -148,7 +148,7 @@ func (kParse *KopiaParseUtilsTestSuite) TestSnapshotInfoFromSnapshotCreateOutput } } `, - checker: IsNil, + checker: check.IsNil, expectedSnapID: "00000000000000000000001", expectedRootID: "ka68ba7abe0818b24a2b0647aeeb02f29", }, @@ -156,14 +156,14 @@ func (kParse *KopiaParseUtilsTestSuite) TestSnapshotInfoFromSnapshotCreateOutput output: `Snapshotting u2@h2:/tmp/aaa1 ... * 0 hashing, 1 hashed (2 B), 3 cached (4 B), uploaded 5 KB, estimating... `, - checker: NotNil, + checker: check.NotNil, expectedSnapID: "", expectedRootID: "", }, { output: `ERROR: unable to get local filesystem entry: resolveSymlink: stat: lstat /tmp/aaa2: no such file or directory `, - checker: NotNil, + checker: check.NotNil, expectedSnapID: "", expectedRootID: "", }, @@ -202,26 +202,26 @@ func (kParse *KopiaParseUtilsTestSuite) TestSnapshotInfoFromSnapshotCreateOutput } } }`, - checker: NotNil, + checker: check.NotNil, expectedSnapID: "", expectedRootID: "", }, } { snapID, rootID, err := SnapshotInfoFromSnapshotCreateOutput(tc.output) c.Assert(err, tc.checker) - c.Assert(snapID, Equals, tc.expectedSnapID) - c.Assert(rootID, Equals, tc.expectedRootID) + c.Assert(snapID, check.Equals, tc.expectedSnapID) + c.Assert(rootID, check.Equals, tc.expectedRootID) } } -func (kParse *KopiaParseUtilsTestSuite) TestSnapSizeStatsFromSnapListAll(c *C) { +func (kParse *KopiaParseUtilsTestSuite) TestSnapSizeStatsFromSnapListAll(c *check.C) { for _, tc := range []struct { description string - outputGenFunc func(*C, []*snapshot.Manifest) string + outputGenFunc func(*check.C, []*snapshot.Manifest) string expManifestList []*snapshot.Manifest expCount int expSize int64 - errChecker Checker + errChecker check.Checker }{ { description: "empty manifest list", @@ -229,7 +229,7 @@ func (kParse *KopiaParseUtilsTestSuite) TestSnapSizeStatsFromSnapListAll(c *C) { expManifestList: []*snapshot.Manifest{}, expCount: 0, expSize: 0, - errChecker: IsNil, + errChecker: check.IsNil, }, { description: "basic manifest list", @@ -245,7 +245,7 @@ func (kParse *KopiaParseUtilsTestSuite) TestSnapSizeStatsFromSnapListAll(c *C) { }, expCount: 1, expSize: 1, - errChecker: IsNil, + errChecker: check.IsNil, }, { description: "manifest list with multiple snapshots", @@ -282,7 +282,7 @@ func (kParse *KopiaParseUtilsTestSuite) TestSnapSizeStatsFromSnapListAll(c *C) { }, expCount: 4, expSize: 1111, - errChecker: IsNil, + errChecker: check.IsNil, }, { description: "error: snapshot with no directory summary, size is treated as zero", @@ -294,7 +294,7 @@ func (kParse *KopiaParseUtilsTestSuite) TestSnapSizeStatsFromSnapListAll(c *C) { }, expCount: 1, expSize: 0, - errChecker: IsNil, + errChecker: check.IsNil, }, { description: "error: snapshot with no root entry, size is treated as zero", @@ -304,37 +304,37 @@ func (kParse *KopiaParseUtilsTestSuite) TestSnapSizeStatsFromSnapListAll(c *C) { }, expCount: 1, expSize: 0, - errChecker: IsNil, + errChecker: check.IsNil, }, { description: "error: parse empty output", - outputGenFunc: func(c *C, manifestList []*snapshot.Manifest) string { + outputGenFunc: func(c *check.C, manifestList []*snapshot.Manifest) string { return "" }, expCount: 0, expSize: 0, - errChecker: NotNil, + errChecker: check.NotNil, }, { description: "error: unmarshal fails", - outputGenFunc: func(c *C, manifestList []*snapshot.Manifest) string { + outputGenFunc: func(c *check.C, manifestList []*snapshot.Manifest) string { return "asdf" }, expCount: 0, expSize: 0, - errChecker: NotNil, + errChecker: check.NotNil, }, } { outputToParse := tc.outputGenFunc(c, tc.expManifestList) gotTotSizeB, gotNumSnapshots, err := SnapSizeStatsFromSnapListAll(outputToParse) - c.Check(err, tc.errChecker, Commentf("Failed for output: %q", outputToParse)) - c.Check(gotTotSizeB, Equals, tc.expSize) - c.Check(gotNumSnapshots, Equals, tc.expCount) + c.Check(err, tc.errChecker, check.Commentf("Failed for output: %q", outputToParse)) + c.Check(gotTotSizeB, check.Equals, tc.expSize) + c.Check(gotNumSnapshots, check.Equals, tc.expCount) c.Log(err) } } -func (kParse *KopiaParseUtilsTestSuite) TestSnapshotStatsFromSnapshotCreate(c *C) { +func (kParse *KopiaParseUtilsTestSuite) TestSnapshotStatsFromSnapshotCreate(c *check.C) { type args struct { snapCreateOutput string matchOnlyFinished bool @@ -494,11 +494,11 @@ func (kParse *KopiaParseUtilsTestSuite) TestSnapshotStatsFromSnapshotCreate(c *C } for _, tt := range tests { stats := SnapshotStatsFromSnapshotCreate(tt.args.snapCreateOutput, tt.args.matchOnlyFinished) - c.Check(stats, DeepEquals, tt.wantStats, Commentf("Failed for %s", tt.name)) + c.Check(stats, check.DeepEquals, tt.wantStats, check.Commentf("Failed for %s", tt.name)) } } -func (kParse *KopiaParseUtilsTestSuite) TestRestoreStatsFromRestoreOutput(c *C) { +func (kParse *KopiaParseUtilsTestSuite) TestRestoreStatsFromRestoreOutput(c *check.C) { type args struct { restoreOutput string } @@ -556,82 +556,82 @@ func (kParse *KopiaParseUtilsTestSuite) TestRestoreStatsFromRestoreOutput(c *C) } for _, tt := range tests { stats := RestoreStatsFromRestoreOutput(tt.args.restoreOutput) - c.Check(stats, DeepEquals, tt.wantStats, Commentf("Failed for %s", tt.name)) + c.Check(stats, check.DeepEquals, tt.wantStats, check.Commentf("Failed for %s", tt.name)) } } -func (kParse *KopiaParseUtilsTestSuite) TestPhysicalSizeFromBlobStatsRaw(c *C) { +func (kParse *KopiaParseUtilsTestSuite) TestPhysicalSizeFromBlobStatsRaw(c *check.C) { for _, tc := range []struct { blobStatsOutput string expSizeVal int64 expCount int - errChecker Checker + errChecker check.Checker }{ { "Count: 813\nTotal: 11235\n", 11235, 813, - IsNil, + check.IsNil, }, { "Total: 11235\nCount: 813\n", 11235, 813, - IsNil, + check.IsNil, }, { "Count: 0\nTotal: 0\n", 0, 0, - IsNil, + check.IsNil, }, { "Count: 5\nTotal: 0.0\n", 0, 0, - NotNil, + check.NotNil, }, { "Count: 5\nTotal: asdf\n", 0, 0, - NotNil, + check.NotNil, }, { "Count: 5\nTotal: 11235,\n", 0, 0, - NotNil, + check.NotNil, }, { "Total: -11235\n", 0, 0, - NotNil, + check.NotNil, }, { "Total: 11235", 0, 0, - NotNil, + check.NotNil, }, { "Count: 11235", 0, 0, - NotNil, + check.NotNil, }, { "Other-field: 11235", 0, 0, - NotNil, + check.NotNil, }, { "random input that doesn't comply with expected format", 0, 0, - NotNil, + check.NotNil, }, { ` @@ -650,17 +650,17 @@ Histogram: 0 between 10000000 and 100000000 (total 0)`, 65628, 26, - IsNil, + check.IsNil, }, } { gotSize, gotCount, err := RepoSizeStatsFromBlobStatsRaw(tc.blobStatsOutput) - c.Check(err, tc.errChecker, Commentf("Failed for log: %s", tc.blobStatsOutput)) - c.Check(gotSize, Equals, tc.expSizeVal) - c.Check(gotCount, Equals, tc.expCount) + c.Check(err, tc.errChecker, check.Commentf("Failed for log: %s", tc.blobStatsOutput)) + c.Check(gotSize, check.Equals, tc.expSizeVal) + c.Check(gotCount, check.Equals, tc.expCount) } } -func (kParse *KopiaParseUtilsTestSuite) TestIsEqualSnapshotCreateStats(c *C) { +func (kParse *KopiaParseUtilsTestSuite) TestIsEqualSnapshotCreateStats(c *check.C) { for _, tc := range []struct { description string a *SnapshotCreateStats @@ -747,11 +747,11 @@ func (kParse *KopiaParseUtilsTestSuite) TestIsEqualSnapshotCreateStats(c *C) { }, } { result := IsEqualSnapshotCreateStats(tc.a, tc.b) - c.Check(result, Equals, tc.expResult) + c.Check(result, check.Equals, tc.expResult) } } -func (kParse *KopiaParseUtilsTestSuite) TestErrorsFromOutput(c *C) { +func (kParse *KopiaParseUtilsTestSuite) TestErrorsFromOutput(c *check.C) { for caseIdx, tc := range []struct { log string expectedErrors []string @@ -779,68 +779,68 @@ func (kParse *KopiaParseUtilsTestSuite) TestErrorsFromOutput(c *C) { {"error restoring: restore error: error copying: copy file: error creating file:", []string{"restoring: restore error: error copying: copy file: error creating file:"}}, } { errs := ErrorsFromOutput(tc.log) - fc := Commentf("Failed for case #%v. Log: %s", caseIdx, tc.log) - c.Check(len(errs), Equals, len(tc.expectedErrors), fc) + fc := check.Commentf("Failed for case #%v. Log: %s", caseIdx, tc.log) + c.Check(len(errs), check.Equals, len(tc.expectedErrors), fc) for i, e := range errs { - c.Check(e.Error(), Equals, tc.expectedErrors[i], fc) + c.Check(e.Error(), check.Equals, tc.expectedErrors[i], fc) } } } -func (kParse *KopiaParseUtilsTestSuite) TestParsePolicyShow(c *C) { +func (kParse *KopiaParseUtilsTestSuite) TestParsePolicyShow(c *check.C) { for _, tc := range []struct { description string - outputGenFunc func(*C, policy.Policy) string + outputGenFunc func(*check.C, policy.Policy) string expPolicyShow policy.Policy - errChecker Checker + errChecker check.Checker }{ { description: "empty policy show", outputGenFunc: marshalPolicy, expPolicyShow: policy.Policy{}, - errChecker: IsNil, + errChecker: check.IsNil, }, { description: "default policy show", outputGenFunc: marshalPolicy, expPolicyShow: *policy.DefaultPolicy, - errChecker: IsNil, + errChecker: check.IsNil, }, { description: "error: parse empty output", - outputGenFunc: func(*C, policy.Policy) string { + outputGenFunc: func(*check.C, policy.Policy) string { return "" }, - errChecker: NotNil, + errChecker: check.NotNil, }, { description: "error: unmarshal fails", - outputGenFunc: func(*C, policy.Policy) string { + outputGenFunc: func(*check.C, policy.Policy) string { return "asdf" }, - errChecker: NotNil, + errChecker: check.NotNil, }, } { outputToParse := tc.outputGenFunc(c, tc.expPolicyShow) gotPolicy, err := ParsePolicyShow(outputToParse) - c.Check(err, tc.errChecker, Commentf("Failed for output: %q", outputToParse)) + c.Check(err, tc.errChecker, check.Commentf("Failed for output: %q", outputToParse)) c.Log(err) - c.Check(gotPolicy, DeepEquals, tc.expPolicyShow) + c.Check(gotPolicy, check.DeepEquals, tc.expPolicyShow) } } -func marshalManifestList(c *C, manifestList []*snapshot.Manifest) string { - c.Assert(manifestList, NotNil) +func marshalManifestList(c *check.C, manifestList []*snapshot.Manifest) string { + c.Assert(manifestList, check.NotNil) b, err := json.Marshal(manifestList) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) return string(b) } -func marshalPolicy(c *C, policy policy.Policy) string { +func marshalPolicy(c *check.C, policy policy.Policy) string { b, err := json.Marshal(policy) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) return string(b) } diff --git a/pkg/kopia/command/policy_test.go b/pkg/kopia/command/policy_test.go index 952102be6c..a3f96a05d4 100644 --- a/pkg/kopia/command/policy_test.go +++ b/pkg/kopia/command/policy_test.go @@ -17,14 +17,14 @@ package command import ( "strings" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type KopiaPolicyTestSuite struct{} -var _ = Suite(&KopiaPolicyTestSuite{}) +var _ = check.Suite(&KopiaPolicyTestSuite{}) -func (kPolicy *KopiaPolicyTestSuite) TestPolicySetCommands(c *C) { +func (kPolicy *KopiaPolicyTestSuite) TestPolicySetCommands(c *check.C) { for _, tc := range []struct { f func() []string expectedLog string @@ -45,11 +45,11 @@ func (kPolicy *KopiaPolicyTestSuite) TestPolicySetCommands(c *C) { }, } { cmd := strings.Join(tc.f(), " ") - c.Check(cmd, Equals, tc.expectedLog) + c.Check(cmd, check.Equals, tc.expectedLog) } } -func (kPolicy *KopiaPolicyTestSuite) TestPolicyShowCommands(c *C) { +func (kPolicy *KopiaPolicyTestSuite) TestPolicyShowCommands(c *check.C) { for _, tc := range []struct { f func() []string expectedLog string @@ -70,6 +70,6 @@ func (kPolicy *KopiaPolicyTestSuite) TestPolicyShowCommands(c *C) { }, } { cmd := strings.Join(tc.f(), " ") - c.Check(cmd, Equals, tc.expectedLog) + c.Check(cmd, check.Equals, tc.expectedLog) } } diff --git a/pkg/kopia/command/restore_test.go b/pkg/kopia/command/restore_test.go index 9fcb8556a2..55359da834 100644 --- a/pkg/kopia/command/restore_test.go +++ b/pkg/kopia/command/restore_test.go @@ -17,14 +17,14 @@ package command import ( "strings" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type KopiaRestoreTestSuite struct{} -var _ = Suite(&KopiaRestoreTestSuite{}) +var _ = check.Suite(&KopiaRestoreTestSuite{}) -func (kRestore *KopiaRestoreTestSuite) TestRestoreCommands(c *C) { +func (kRestore *KopiaRestoreTestSuite) TestRestoreCommands(c *check.C) { for _, tc := range []struct { f func() []string expectedLog string @@ -79,6 +79,6 @@ func (kRestore *KopiaRestoreTestSuite) TestRestoreCommands(c *C) { }, } { cmd := strings.Join(tc.f(), " ") - c.Check(cmd, Equals, tc.expectedLog) + c.Check(cmd, check.Equals, tc.expectedLog) } } diff --git a/pkg/kopia/command/server_test.go b/pkg/kopia/command/server_test.go index ace89aee41..4d03ecb910 100644 --- a/pkg/kopia/command/server_test.go +++ b/pkg/kopia/command/server_test.go @@ -17,16 +17,16 @@ package command import ( "strings" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/kopia/cli/args" ) type KopiaServerTestSuite struct{} -var _ = Suite(&KopiaServerTestSuite{}) +var _ = check.Suite(&KopiaServerTestSuite{}) -func (kServer *KopiaServerTestSuite) TestServerCommands(c *C) { +func (kServer *KopiaServerTestSuite) TestServerCommands(c *check.C) { commandArgs := &CommandArgs{ RepoPassword: "encr-key", ConfigFilePath: "path/kopia.config", @@ -277,6 +277,6 @@ func (kServer *KopiaServerTestSuite) TestServerCommands(c *C) { }, } { cmd := strings.Join(tc.f(), " ") - c.Check(cmd, Equals, tc.expectedLog) + c.Check(cmd, check.Equals, tc.expectedLog) } } diff --git a/pkg/kopia/command/snapshot_test.go b/pkg/kopia/command/snapshot_test.go index bc62e4e0ed..a76cacb6d6 100644 --- a/pkg/kopia/command/snapshot_test.go +++ b/pkg/kopia/command/snapshot_test.go @@ -18,14 +18,14 @@ import ( "strings" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type KopiaSnapshotTestSuite struct{} -var _ = Suite(&KopiaSnapshotTestSuite{}) +var _ = check.Suite(&KopiaSnapshotTestSuite{}) -func (kSnapshot *KopiaSnapshotTestSuite) TestSnapshotCommands(c *C) { +func (kSnapshot *KopiaSnapshotTestSuite) TestSnapshotCommands(c *check.C) { commandArgs := &CommandArgs{ RepoPassword: "encr-key", ConfigFilePath: "path/kopia.config", @@ -149,6 +149,6 @@ func (kSnapshot *KopiaSnapshotTestSuite) TestSnapshotCommands(c *C) { }, } { cmd := strings.Join(tc.f(), " ") - c.Check(cmd, Equals, tc.expectedLog) + c.Check(cmd, check.Equals, tc.expectedLog) } } diff --git a/pkg/kopia/errors/utils_test.go b/pkg/kopia/errors/utils_test.go index 164d3d79da..e4370f0406 100644 --- a/pkg/kopia/errors/utils_test.go +++ b/pkg/kopia/errors/utils_test.go @@ -19,31 +19,31 @@ import ( "github.com/kanisterio/errkit" "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. -func TestKopiaErrors(t *testing.T) { TestingT(t) } +func TestKopiaErrors(t *testing.T) { check.TestingT(t) } type KopiaErrorsTestSuite struct{} -var _ = Suite(&KopiaErrorsTestSuite{}) +var _ = check.Suite(&KopiaErrorsTestSuite{}) // TestErrCheck verifies that error types are properly detected after wrapping them -func (s *KopiaErrorsTestSuite) TestErrCheck(c *C) { +func (s *KopiaErrorsTestSuite) TestErrCheck(c *check.C) { origErr := errors.New("Some error") errWithMessage := errors.WithMessage(origErr, ErrInvalidPasswordStr) errWrapped := errkit.Wrap(origErr, ErrInvalidPasswordStr) - c.Assert(IsInvalidPasswordError(errWithMessage), Equals, true) - c.Assert(IsInvalidPasswordError(errWrapped), Equals, true) - c.Assert(IsRepoNotFoundError(errWrapped), Equals, false) + c.Assert(IsInvalidPasswordError(errWithMessage), check.Equals, true) + c.Assert(IsInvalidPasswordError(errWrapped), check.Equals, true) + c.Assert(IsRepoNotFoundError(errWrapped), check.Equals, false) permittedErrors := []ErrorType{ErrorInvalidPassword, ErrorRepoNotFound} - c.Assert(CheckKopiaErrors(errWithMessage, permittedErrors), Equals, true) - c.Assert(CheckKopiaErrors(errWrapped, permittedErrors), Equals, true) + c.Assert(CheckKopiaErrors(errWithMessage, permittedErrors), check.Equals, true) + c.Assert(CheckKopiaErrors(errWrapped, permittedErrors), check.Equals, true) wrongErrors := []ErrorType{ErrorRepoNotFound} - c.Assert(CheckKopiaErrors(errWrapped, wrongErrors), Equals, false) + c.Assert(CheckKopiaErrors(errWrapped, wrongErrors), check.Equals, false) } diff --git a/pkg/kopia/maintenance/get_maintenance_owner_test.go b/pkg/kopia/maintenance/get_maintenance_owner_test.go index 76a9fbd92a..78b7e149aa 100644 --- a/pkg/kopia/maintenance/get_maintenance_owner_test.go +++ b/pkg/kopia/maintenance/get_maintenance_owner_test.go @@ -21,17 +21,17 @@ import ( kopiacli "github.com/kopia/kopia/cli" "github.com/kopia/kopia/repo/maintenance" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. -func TestKopiaMaintenanceWrappers(t *testing.T) { TestingT(t) } +func TestKopiaMaintenanceWrappers(t *testing.T) { check.TestingT(t) } type KopiaMaintenanceOwnerTestSuite struct{} -var _ = Suite(&KopiaMaintenanceOwnerTestSuite{}) +var _ = check.Suite(&KopiaMaintenanceOwnerTestSuite{}) -func (kMaintenanceOwner *KopiaMaintenanceOwnerTestSuite) TestParseMaintenanceOwnerOutput(c *C) { +func (kMaintenanceOwner *KopiaMaintenanceOwnerTestSuite) TestParseMaintenanceOwnerOutput(c *check.C) { maintInfoResult := kopiacli.MaintenanceInfo{ Params: maintenance.Params{ Owner: "owner@hostname", @@ -68,29 +68,29 @@ func (kMaintenanceOwner *KopiaMaintenanceOwnerTestSuite) TestParseMaintenanceOwn }, } maintOutput, err := json.Marshal(maintInfoResult) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, tc := range []struct { desc string output []byte expectedOwner string - expectedErr Checker + expectedErr check.Checker }{ { desc: "empty output", output: []byte{}, expectedOwner: "", - expectedErr: NotNil, + expectedErr: check.NotNil, }, { desc: "maintenance output", output: maintOutput, expectedOwner: "owner@hostname", - expectedErr: IsNil, + expectedErr: check.IsNil, }, } { owner, err := parseOwner(tc.output) - c.Assert(err, tc.expectedErr, Commentf("Case: %s", tc.desc)) - c.Assert(owner, Equals, tc.expectedOwner, Commentf("Case: %s", tc.desc)) + c.Assert(err, tc.expectedErr, check.Commentf("Case: %s", tc.desc)) + c.Assert(owner, check.Equals, tc.expectedOwner, check.Commentf("Case: %s", tc.desc)) } } diff --git a/pkg/kopia/repository/client.go b/pkg/kopia/repository/client.go index 49ed82e61c..99205d1fe8 100644 --- a/pkg/kopia/repository/client.go +++ b/pkg/kopia/repository/client.go @@ -16,6 +16,7 @@ package repository import ( "context" + "fmt" "os" "path/filepath" "strings" @@ -41,6 +42,33 @@ const ( maxConnectionRetries = 100 ) +// AccessMode defines the types of access levels for a Kopia repository. +type AccessMode int + +const ( + // ReadOnlyAccess indicates that the repository access level is read-only (no modifications expected). + ReadOnlyAccess AccessMode = iota + // WriteAccess indicates that the repository access level allows modifications. + WriteAccess +) + +// IsReadOnly returns true if repository access mode is Read-only. +func (m AccessMode) IsReadOnly() bool { + return m == ReadOnlyAccess +} + +// String returns a string representation of the RepositoryAccessMode type. +func (m AccessMode) String() string { + switch m { + case ReadOnlyAccess: + return "ReadOnlyAccess" + case WriteAccess: + return "WriteAccess" + default: + return fmt.Sprintf("RepositoryAccessMode(%d)", m) + } +} + var apiConnectBackoff = backoff.Backoff{ Factor: 2, Jitter: false, @@ -58,6 +86,7 @@ func ConnectToAPIServer( username string, contentCacheMB, metadataCacheMB int, + accessMode AccessMode, ) error { // Extra fingerprint from the TLS Certificate secret fingerprint, err := kopia.ExtractFingerprintFromCertificate(tlsCert) @@ -83,6 +112,7 @@ func ConnectToAPIServer( ClientOptions: repo.ClientOptions{ Hostname: hostname, Username: username, + ReadOnly: accessMode.IsReadOnly(), }, } diff --git a/pkg/kopia/repository/client_test.go b/pkg/kopia/repository/client_test.go index f6eef44de1..d060729fcf 100644 --- a/pkg/kopia/repository/client_test.go +++ b/pkg/kopia/repository/client_test.go @@ -3,9 +3,8 @@ package repository_test import ( "testing" - "gopkg.in/check.v1" - "github.com/kopia/kopia/repo" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/kopia/repository" ) diff --git a/pkg/ksprig/fipsonly_sprig_test.go b/pkg/ksprig/fipsonly_sprig_test.go index 2de1e33905..7a2ea4968c 100644 --- a/pkg/ksprig/fipsonly_sprig_test.go +++ b/pkg/ksprig/fipsonly_sprig_test.go @@ -20,18 +20,18 @@ import ( "testing" "text/template" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/ksprig" ) type FipsOnlySprigSuite struct{} -var _ = Suite(&FipsOnlySprigSuite{}) +var _ = check.Suite(&FipsOnlySprigSuite{}) -func TestFipsOnlySprigSuite(t *testing.T) { TestingT(t) } +func TestFipsOnlySprigSuite(t *testing.T) { check.TestingT(t) } -func (f *FipsOnlySprigSuite) TestUnsupportedTxtFuncMapUsage(c *C) { +func (f *FipsOnlySprigSuite) TestUnsupportedTxtFuncMapUsage(c *check.C) { funcMap := ksprig.TxtFuncMap() testCases := []struct { @@ -69,17 +69,17 @@ func (f *FipsOnlySprigSuite) TestUnsupportedTxtFuncMapUsage(c *C) { c.Logf("Testing %s", tc.function) temp, err := template.New("test").Funcs(funcMap).Parse(tc.templateText) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = temp.Execute(nil, "") var sprigErr ksprig.UnsupportedSprigUsageErr - c.Assert(errors.As(err, &sprigErr), Equals, true) - c.Assert(sprigErr.Usage, Equals, tc.usageErr) + c.Assert(errors.As(err, &sprigErr), check.Equals, true) + c.Assert(sprigErr.Usage, check.Equals, tc.usageErr) } } -func (f *FipsOnlySprigSuite) TestSupportedTxtFuncMapUsage(c *C) { +func (f *FipsOnlySprigSuite) TestSupportedTxtFuncMapUsage(c *check.C) { funcMap := ksprig.TxtFuncMap() testCases := []struct { @@ -113,9 +113,9 @@ func (f *FipsOnlySprigSuite) TestSupportedTxtFuncMapUsage(c *C) { c.Logf("Testing %s", tc.description) temp, err := template.New("test").Funcs(funcMap).Parse(tc.templateText) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = temp.Execute(&strings.Builder{}, "") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } diff --git a/pkg/kube/exec_test.go b/pkg/kube/exec_test.go index 0c5990e2a4..9242abb4ec 100644 --- a/pkg/kube/exec_test.go +++ b/pkg/kube/exec_test.go @@ -24,7 +24,7 @@ import ( "strings" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -36,20 +36,20 @@ type ExecSuite struct { pod *corev1.Pod } -var _ = Suite(&ExecSuite{}) +var _ = check.Suite(&ExecSuite{}) -func (s *ExecSuite) SetUpSuite(c *C) { +func (s *ExecSuite) SetUpSuite(c *check.C) { ctx := context.Background() var err error s.cli, err = NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "exectest-", }, } ns, err = s.cli.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = ns.Name pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "testpod"}, @@ -64,50 +64,50 @@ func (s *ExecSuite) SetUpSuite(c *C) { }, } s.pod, err = s.cli.CoreV1().Pods(s.namespace).Create(ctx, pod, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctxTimeout, cancel := context.WithTimeout(ctx, 5*time.Minute) defer cancel() - c.Assert(WaitForPodReady(ctxTimeout, s.cli, s.namespace, s.pod.Name), IsNil) + c.Assert(WaitForPodReady(ctxTimeout, s.cli, s.namespace, s.pod.Name), check.IsNil) s.pod, err = s.cli.CoreV1().Pods(s.namespace).Get(ctx, s.pod.Name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *ExecSuite) TearDownSuite(c *C) { +func (s *ExecSuite) TearDownSuite(c *check.C) { if s.namespace != "" { err := s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } -func (s *ExecSuite) TestStderr(c *C) { +func (s *ExecSuite) TestStderr(c *check.C) { cmd := []string{"sh", "-c", "echo -n hello >&2"} for _, cs := range s.pod.Status.ContainerStatuses { stdout, stderr, err := Exec(context.Background(), s.cli, s.pod.Namespace, s.pod.Name, cs.Name, cmd, nil) - c.Assert(err, IsNil) - c.Assert(stdout, Equals, "") - c.Assert(stderr, Equals, "hello") + c.Assert(err, check.IsNil) + c.Assert(stdout, check.Equals, "") + c.Assert(stderr, check.Equals, "hello") } cmd = []string{"sh", "-c", "echo -n hello && exit 1"} for _, cs := range s.pod.Status.ContainerStatuses { stdout, stderr, err := Exec(context.Background(), s.cli, s.pod.Namespace, s.pod.Name, cs.Name, cmd, nil) - c.Assert(err, NotNil) - c.Assert(stdout, Equals, "hello") - c.Assert(stderr, Equals, "") + c.Assert(err, check.NotNil) + c.Assert(stdout, check.Equals, "hello") + c.Assert(stderr, check.Equals, "") } cmd = []string{"sh", "-c", "count=0; while true; do printf $count; let count=$count+1; if [ $count -eq 6 ]; then exit 1; fi; done"} for _, cs := range s.pod.Status.ContainerStatuses { stdout, stderr, err := Exec(context.Background(), s.cli, s.pod.Namespace, s.pod.Name, cs.Name, cmd, nil) - c.Assert(err, NotNil) - c.Assert(stdout, Equals, "012345") - c.Assert(stderr, Equals, "") + c.Assert(err, check.NotNil) + c.Assert(stdout, check.Equals, "012345") + c.Assert(stderr, check.Equals, "") } } -func (s *ExecSuite) TestExecWithWriterOptions(c *C) { - c.Assert(s.pod.Status.Phase, Equals, corev1.PodRunning) - c.Assert(len(s.pod.Status.ContainerStatuses) > 0, Equals, true) +func (s *ExecSuite) TestExecWithWriterOptions(c *check.C) { + c.Assert(s.pod.Status.Phase, check.Equals, corev1.PodRunning) + c.Assert(len(s.pod.Status.ContainerStatuses) > 0, check.Equals, true) var testCases = []struct { cmd []string @@ -140,15 +140,15 @@ func (s *ExecSuite) TestExecWithWriterOptions(c *C) { Stderr: buferr, } err := ExecWithOptions(context.Background(), s.cli, opts) - c.Assert(err, IsNil) - c.Assert(bufout.String(), Equals, testCase.expectedOut) - c.Assert(buferr.String(), Equals, testCase.expectedErr) + c.Assert(err, check.IsNil) + c.Assert(bufout.String(), check.Equals, testCase.expectedOut) + c.Assert(buferr.String(), check.Equals, testCase.expectedErr) } } -func (s *ExecSuite) TestErrorInExecWithOptions(c *C) { - c.Assert(s.pod.Status.Phase, Equals, corev1.PodRunning) - c.Assert(len(s.pod.Status.ContainerStatuses) > 0, Equals, true) +func (s *ExecSuite) TestErrorInExecWithOptions(c *check.C) { + c.Assert(s.pod.Status.Phase, check.Equals, corev1.PodRunning) + c.Assert(len(s.pod.Status.ContainerStatuses) > 0, check.Equals, true) var testCases = []struct { cmd []string @@ -188,14 +188,14 @@ func (s *ExecSuite) TestErrorInExecWithOptions(c *C) { Stdin: nil, } err1 := ExecWithOptions(context.Background(), s.cli, opts) - c.Assert(err1, Not(IsNil)) + c.Assert(err1, check.Not(check.IsNil)) var ee1 *ExecError ok := errors.As(err1, &ee1) - c.Assert(ok, Equals, true) - c.Assert(ee1.Stdout(), Not(Equals), testCase.expectedOut) - c.Assert(ee1.Stderr(), Not(Equals), testCase.expectedErr) - c.Assert(ee1.Error(), Equals, testCase.expectedText) + c.Assert(ok, check.Equals, true) + c.Assert(ee1.Stdout(), check.Not(check.Equals), testCase.expectedOut) + c.Assert(ee1.Stderr(), check.Not(check.Equals), testCase.expectedErr) + c.Assert(ee1.Error(), check.Equals, testCase.expectedText) // Now try the same with passing buffers for stdout and stderr // This should not affect returned error @@ -205,58 +205,58 @@ func (s *ExecSuite) TestErrorInExecWithOptions(c *C) { opts.Stderr = &buferr err2 := ExecWithOptions(context.Background(), s.cli, opts) - c.Assert(err2, Not(IsNil)) + c.Assert(err2, check.Not(check.IsNil)) var ee2 *ExecError ok = errors.As(err2, &ee2) - c.Assert(ok, Equals, true) + c.Assert(ok, check.Equals, true) // When error happens, stdout/stderr buffers should contain all lines produced by an app - c.Assert(bufout.String(), Equals, strings.Join(testCase.expectedOut, "\n")) - c.Assert(buferr.String(), Equals, strings.Join(testCase.expectedErr, "\n")) + c.Assert(bufout.String(), check.Equals, strings.Join(testCase.expectedOut, "\n")) + c.Assert(buferr.String(), check.Equals, strings.Join(testCase.expectedErr, "\n")) // When error happens, ExecError should contain only last ten lines of stdout/stderr - c.Assert(ee2.Stdout(), Equals, strings.Join(getSliceTail(testCase.expectedOut, logTailDefaultLength), "\r\n")) - c.Assert(ee2.Stderr(), Equals, strings.Join(getSliceTail(testCase.expectedErr, logTailDefaultLength), "\r\n")) + c.Assert(ee2.Stdout(), check.Equals, strings.Join(getSliceTail(testCase.expectedOut, logTailDefaultLength), "\r\n")) + c.Assert(ee2.Stderr(), check.Equals, strings.Join(getSliceTail(testCase.expectedErr, logTailDefaultLength), "\r\n")) // When error happens, ExecError should include stdout/stderr into its text representation - c.Assert(ee2.Error(), Equals, testCase.expectedText) + c.Assert(ee2.Error(), check.Equals, testCase.expectedText) } } -func (s *ExecSuite) TestExecEcho(c *C) { +func (s *ExecSuite) TestExecEcho(c *check.C) { cmd := []string{"sh", "-c", "cat -"} - c.Assert(s.pod.Status.Phase, Equals, corev1.PodRunning) - c.Assert(len(s.pod.Status.ContainerStatuses) > 0, Equals, true) + c.Assert(s.pod.Status.Phase, check.Equals, corev1.PodRunning) + c.Assert(len(s.pod.Status.ContainerStatuses) > 0, check.Equals, true) for _, cs := range s.pod.Status.ContainerStatuses { stdout, stderr, err := Exec(context.Background(), s.cli, s.pod.Namespace, s.pod.Name, cs.Name, cmd, bytes.NewBufferString("badabing")) - c.Assert(err, IsNil) - c.Assert(stdout, Equals, "badabing") - c.Assert(stderr, Equals, "") + c.Assert(err, check.IsNil) + c.Assert(stdout, check.Equals, "badabing") + c.Assert(stderr, check.Equals, "") } } -func (s *ExecSuite) TestExecEchoDefaultContainer(c *C) { +func (s *ExecSuite) TestExecEchoDefaultContainer(c *check.C) { cmd := []string{"sh", "-c", "cat -"} - c.Assert(s.pod.Status.Phase, Equals, corev1.PodRunning) - c.Assert(len(s.pod.Status.ContainerStatuses) > 0, Equals, true) + c.Assert(s.pod.Status.Phase, check.Equals, corev1.PodRunning) + c.Assert(len(s.pod.Status.ContainerStatuses) > 0, check.Equals, true) stdout, stderr, err := Exec(context.Background(), s.cli, s.pod.Namespace, s.pod.Name, "", cmd, bytes.NewBufferString("badabing")) - c.Assert(err, IsNil) - c.Assert(stdout, Equals, "badabing") - c.Assert(stderr, Equals, "") + c.Assert(err, check.IsNil) + c.Assert(stdout, check.Equals, "badabing") + c.Assert(stderr, check.Equals, "") } -func (s *ExecSuite) TestLSWithoutStdIn(c *C) { +func (s *ExecSuite) TestLSWithoutStdIn(c *check.C) { cmd := []string{"ls", "-l", "/home"} - c.Assert(s.pod.Status.Phase, Equals, corev1.PodRunning) - c.Assert(len(s.pod.Status.ContainerStatuses) > 0, Equals, true) + c.Assert(s.pod.Status.Phase, check.Equals, corev1.PodRunning) + c.Assert(len(s.pod.Status.ContainerStatuses) > 0, check.Equals, true) stdout, stderr, err := Exec(context.Background(), s.cli, s.pod.Namespace, s.pod.Name, "", cmd, nil) - c.Assert(err, IsNil) - c.Assert(stdout, Equals, "total 0") - c.Assert(stderr, Equals, "") + c.Assert(err, check.IsNil) + c.Assert(stdout, check.Equals, "total 0") + c.Assert(stderr, check.Equals, "") } -func (s *ExecSuite) TestKopiaCommand(c *C) { +func (s *ExecSuite) TestKopiaCommand(c *check.C) { ctx := context.Background() pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -273,50 +273,50 @@ func (s *ExecSuite) TestKopiaCommand(c *C) { }, } p, err := s.cli.CoreV1().Pods(s.namespace).Create(ctx, pod, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err := s.cli.CoreV1().Pods(s.namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() ctxT, cancel := context.WithTimeout(ctx, 5*time.Minute) defer cancel() - c.Assert(WaitForPodReady(ctxT, s.cli, s.namespace, p.Name), IsNil) + c.Assert(WaitForPodReady(ctxT, s.cli, s.namespace, p.Name), check.IsNil) // up until now below is how we were used to run kopia commands // "bash" "-c" "kopia repository create filesystem --path=$HOME/kopia_repo --password=newpass" // but now we don't want `bash -c` cmd := []string{"kopia", "repository", "create", "filesystem", "--path=$HOME/kopia_repo", "--password=newpass"} stdout, stderr, err := Exec(context.Background(), s.cli, pod.Namespace, pod.Name, "", cmd, nil) - c.Assert(err, IsNil) - c.Assert(strings.Contains(stdout, "Policy for (global):"), Equals, true) - c.Assert(strings.Contains(stderr, "Initializing repository with:"), Equals, true) + c.Assert(err, check.IsNil) + c.Assert(strings.Contains(stdout, "Policy for (global):"), check.Equals, true) + c.Assert(strings.Contains(stderr, "Initializing repository with:"), check.Equals, true) } // TestContextTimeout verifies that when context is cancelled during command execution, // execution will be interrupted and proper error will be returned. The stdout, stderr streams should be captured. -func (s *ExecSuite) TestContextTimeout(c *C) { +func (s *ExecSuite) TestContextTimeout(c *check.C) { ctx, cancel := context.WithTimeout(context.Background(), 1000*time.Millisecond) defer cancel() cmd := []string{"sh", "-c", "echo abc && sleep 2 && echo def"} for _, cs := range s.pod.Status.ContainerStatuses { stdout, stderr, err := Exec(ctx, s.cli, s.pod.Namespace, s.pod.Name, cs.Name, cmd, nil) - c.Assert(err, NotNil) - c.Assert(stdout, Equals, "abc") - c.Assert(stderr, Equals, "") - c.Assert(err.Error(), Equals, "Failed to exec command in pod: context deadline exceeded.\nstdout: abc\nstderr: ") + c.Assert(err, check.NotNil) + c.Assert(stdout, check.Equals, "abc") + c.Assert(stderr, check.Equals, "") + c.Assert(err.Error(), check.Equals, "Failed to exec command in pod: context deadline exceeded.\nstdout: abc\nstderr: ") } } // TestCancelledContext verifies that when execution is proceeded with context which is already cancelled, // proper error will be returned. The stdout, stderr streams should remain empty, because command has not been executed. -func (s *ExecSuite) TestCancelledContext(c *C) { +func (s *ExecSuite) TestCancelledContext(c *check.C) { ctx, cancel := context.WithCancel(context.Background()) cancel() cmd := []string{"sh", "-c", "echo abc && sleep 2"} for _, cs := range s.pod.Status.ContainerStatuses { stdout, stderr, err := Exec(ctx, s.cli, s.pod.Namespace, s.pod.Name, cs.Name, cmd, nil) - c.Assert(err, NotNil) - c.Assert(stdout, Equals, "") - c.Assert(stderr, Equals, "") - c.Assert(err.Error(), Matches, "Failed to exec command in pod: error sending request: Post \".*\": .*: operation was canceled.\nstdout: \nstderr: ") + c.Assert(err, check.NotNil) + c.Assert(stdout, check.Equals, "") + c.Assert(stderr, check.Equals, "") + c.Assert(err.Error(), check.Matches, "Failed to exec command in pod: error sending request: Post \".*\": .*: operation was canceled.\nstdout: \nstderr: ") } } diff --git a/pkg/kube/fips_test.go b/pkg/kube/fips_test.go index e41cc52a53..7999cae8b1 100644 --- a/pkg/kube/fips_test.go +++ b/pkg/kube/fips_test.go @@ -23,7 +23,7 @@ import ( "strings" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -35,20 +35,20 @@ type FIPSSuite struct { pod *corev1.Pod } -var _ = Suite(&FIPSSuite{}) +var _ = check.Suite(&FIPSSuite{}) -func (s *FIPSSuite) SetUpSuite(c *C) { +func (s *FIPSSuite) SetUpSuite(c *check.C) { ctx := context.Background() var err error s.cli, err = NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "fipstest-", }, } ns, err = s.cli.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = ns.Name pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "testpod"}, @@ -63,30 +63,30 @@ func (s *FIPSSuite) SetUpSuite(c *C) { }, } s.pod, err = s.cli.CoreV1().Pods(s.namespace).Create(ctx, pod, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctxTimeout, cancel := context.WithTimeout(ctx, 5*time.Minute) defer cancel() - c.Assert(WaitForPodReady(ctxTimeout, s.cli, s.namespace, s.pod.Name), IsNil) + c.Assert(WaitForPodReady(ctxTimeout, s.cli, s.namespace, s.pod.Name), check.IsNil) s.pod, err = s.cli.CoreV1().Pods(s.namespace).Get(ctx, s.pod.Name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // install go in kanister-tools pod cmd := []string{"microdnf", "install", "-y", "go"} for _, cs := range s.pod.Status.ContainerStatuses { stdout, stderr, err := Exec(ctx, s.cli, s.pod.Namespace, s.pod.Name, cs.Name, cmd, nil) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) c.Log(stderr) c.Log(stdout) } } -func (s *FIPSSuite) TearDownSuite(c *C) { +func (s *FIPSSuite) TearDownSuite(c *check.C) { if s.namespace != "" { err := s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } -func (s *FIPSSuite) TestFIPSBoringEnabled(c *C) { +func (s *FIPSSuite) TestFIPSBoringEnabled(c *check.C) { for _, tool := range []string{ "/usr/local/bin/kopia", "/usr/local/bin/kando", @@ -95,8 +95,8 @@ func (s *FIPSSuite) TestFIPSBoringEnabled(c *C) { cmd := []string{"go", "tool", "nm", tool} for _, cs := range s.pod.Status.ContainerStatuses { stdout, stderr, err := Exec(context.Background(), s.cli, s.pod.Namespace, s.pod.Name, cs.Name, cmd, nil) - c.Assert(err, IsNil) - c.Assert(stderr, Equals, "") + c.Assert(err, check.IsNil) + c.Assert(stderr, check.Equals, "") scanner := bufio.NewScanner(strings.NewReader(stdout)) fipsModeSet := false for scanner.Scan() { @@ -107,7 +107,7 @@ func (s *FIPSSuite) TestFIPSBoringEnabled(c *C) { fipsModeSet = true } } - c.Assert(fipsModeSet, Equals, true) + c.Assert(fipsModeSet, check.Equals, true) } } } diff --git a/pkg/kube/kube_test.go b/pkg/kube/kube_test.go index 4418276726..bb5d597d79 100644 --- a/pkg/kube/kube_test.go +++ b/pkg/kube/kube_test.go @@ -17,8 +17,8 @@ package kube import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } diff --git a/pkg/kube/log_reader_test.go b/pkg/kube/log_reader_test.go index 361e89f31a..b83a89ea29 100644 --- a/pkg/kube/log_reader_test.go +++ b/pkg/kube/log_reader_test.go @@ -6,13 +6,13 @@ import ( "errors" "io" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/client-go/rest" ) type LogReaderSuite struct{} -var _ = Suite(&LogReaderSuite{}) +var _ = check.Suite(&LogReaderSuite{}) var _ io.ReadCloser = (*buffer)(nil) @@ -38,7 +38,7 @@ func (frw *fakeResponseWrapper) Stream(context.Context) (io.ReadCloser, error) { return buffer{frw.buf}, frw.err } -func (s *LogReaderSuite) TestLogReader(c *C) { +func (s *LogReaderSuite) TestLogReader(c *check.C) { err := errors.New("TEST") for _, tc := range []struct { rw *fakeResponseWrapper @@ -80,7 +80,7 @@ func (s *LogReaderSuite) TestLogReader(c *C) { } { lr := newLogReader(tc.rw) out, err := io.ReadAll(lr) - c.Assert(err, Equals, tc.err) - c.Assert(string(out), Equals, tc.out) + c.Assert(err, check.Equals, tc.err) + c.Assert(string(out), check.Equals, tc.out) } } diff --git a/pkg/kube/log_tail_test.go b/pkg/kube/log_tail_test.go index 94e88fbf27..f59e222946 100644 --- a/pkg/kube/log_tail_test.go +++ b/pkg/kube/log_tail_test.go @@ -18,14 +18,14 @@ package kube import ( - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type LogTailTestSuite struct{} -var _ = Suite(&LogTailTestSuite{}) +var _ = check.Suite(&LogTailTestSuite{}) -func (s *LogTailTestSuite) TestLogsTail(c *C) { +func (s *LogTailTestSuite) TestLogsTail(c *check.C) { for caseIdx, tc := range []struct { limit int input []string @@ -36,37 +36,37 @@ func (s *LogTailTestSuite) TestLogsTail(c *C) { {5, []string{"line 1", "line 2"}, "line 1\r\nline 2"}, {1, []string{"line 1", "line 2"}, "line 2"}, } { - fc := Commentf("Failed for case #%v. Log: %s", caseIdx, tc.expected) + fc := check.Commentf("Failed for case #%v. Log: %s", caseIdx, tc.expected) lt := NewLogTail(tc.limit) for _, in := range tc.input { w, e := lt.Write([]byte(in)) - c.Check(e, IsNil) - c.Check(w, Equals, len([]byte(in))) + c.Check(e, check.IsNil) + c.Check(w, check.Equals, len([]byte(in))) } r := lt.ToString() - c.Check(r, Equals, tc.expected, fc) + c.Check(r, check.Equals, tc.expected, fc) } lt := NewLogTail(3) - c.Check(lt.ToString(), Equals, "") // If there were no writes at all, output should be empty line + c.Check(lt.ToString(), check.Equals, "") // If there were no writes at all, output should be empty line _, err := lt.Write([]byte("line1")) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = lt.Write([]byte("line2")) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) - c.Check(lt.ToString(), Equals, "line1\r\nline2") - c.Check(lt.ToString(), Equals, "line1\r\nline2") // Second invocation should get the same result + c.Check(lt.ToString(), check.Equals, "line1\r\nline2") + c.Check(lt.ToString(), check.Equals, "line1\r\nline2") // Second invocation should get the same result // Check that buffer is still working after ToString is called _, err = lt.Write([]byte("line3")) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) - c.Check(lt.ToString(), Equals, "line1\r\nline2\r\nline3") + c.Check(lt.ToString(), check.Equals, "line1\r\nline2\r\nline3") _, err = lt.Write([]byte("line4")) - c.Assert(err, IsNil) - c.Check(lt.ToString(), Equals, "line2\r\nline3\r\nline4") + c.Assert(err, check.IsNil) + c.Check(lt.ToString(), check.Equals, "line2\r\nline3\r\nline4") } diff --git a/pkg/kube/pod_command_executor_test.go b/pkg/kube/pod_command_executor_test.go index 332eab2f6b..bc037dad47 100644 --- a/pkg/kube/pod_command_executor_test.go +++ b/pkg/kube/pod_command_executor_test.go @@ -22,13 +22,13 @@ import ( "sync" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/client-go/kubernetes/fake" ) type PodCommandExecutorTestSuite struct{} -var _ = Suite(&PodCommandExecutorTestSuite{}) +var _ = check.Suite(&PodCommandExecutorTestSuite{}) const ( podCommandExecutorNS = "pod-runner-test" @@ -36,9 +36,9 @@ const ( podCommandExecutorContainerName = "test-container" ) -func (s *PodCommandExecutorTestSuite) SetUpSuite(c *C) { +func (s *PodCommandExecutorTestSuite) SetUpSuite(c *check.C) { err := os.Setenv("POD_NAMESPACE", podCommandExecutorNS) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } // testBarrier supports race-free synchronization between a controller and a background goroutine. @@ -96,7 +96,7 @@ func (fprp *fakePodCommandExecutorProcessor) ExecWithOptions(ctx context.Context return fprp.execWithOptionsErr } -func (s *PodCommandExecutorTestSuite) TestPodRunnerExec(c *C) { +func (s *PodCommandExecutorTestSuite) TestPodRunnerExec(c *check.C) { ctx := context.Background() cli := fake.NewSimpleClientset() @@ -128,8 +128,8 @@ func (s *PodCommandExecutorTestSuite) TestPodRunnerExec(c *C) { // allow the background goroutine to terminate (no-op if not Setup) prp.execWithOptionsSyncEnd.Sync() - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, context.DeadlineExceeded), Equals, true) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, context.DeadlineExceeded), check.Equals, true) }, "Cancelled": func(ctx context.Context, pr PodCommandExecutor, prp *fakePodCommandExecutorProcessor) { var err error @@ -150,8 +150,8 @@ func (s *PodCommandExecutorTestSuite) TestPodRunnerExec(c *C) { wg.Wait() prp.execWithOptionsSyncEnd.Sync() // Release ExecWithOptions - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, context.Canceled), Equals, true) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, context.Canceled), check.Equals, true) }, "Successful execution": func(ctx context.Context, pr PodCommandExecutor, prp *fakePodCommandExecutorProcessor) { var err error @@ -171,18 +171,18 @@ func (s *PodCommandExecutorTestSuite) TestPodRunnerExec(c *C) { wg.Wait() prp.execWithOptionsSyncEnd.Sync() // Release ExecWithOptions - c.Assert(err, IsNil) - c.Assert(prp.inExecWithOptionsOpts.Command, DeepEquals, command) - c.Assert(prp.inExecWithOptionsOpts.Namespace, Equals, podCommandExecutorNS) - c.Assert(prp.inExecWithOptionsOpts.PodName, Equals, podCommandExecutorPodName) - c.Assert(prp.inExecWithOptionsOpts.ContainerName, Equals, podCommandExecutorContainerName) - c.Assert(prp.inExecWithOptionsOpts.Stdin, Equals, &bStdin) - c.Assert(prp.inExecWithOptionsOpts.Stdout, Not(IsNil)) - c.Assert(prp.inExecWithOptionsOpts.Stderr, Not(IsNil)) - c.Assert(bStdout.Len() > 0, Equals, true) - c.Assert(bStderr.Len() > 0, Equals, true) - c.Assert(bStdout.String(), Equals, expStdout) - c.Assert(bStderr.String(), Equals, expStderr) + c.Assert(err, check.IsNil) + c.Assert(prp.inExecWithOptionsOpts.Command, check.DeepEquals, command) + c.Assert(prp.inExecWithOptionsOpts.Namespace, check.Equals, podCommandExecutorNS) + c.Assert(prp.inExecWithOptionsOpts.PodName, check.Equals, podCommandExecutorPodName) + c.Assert(prp.inExecWithOptionsOpts.ContainerName, check.Equals, podCommandExecutorContainerName) + c.Assert(prp.inExecWithOptionsOpts.Stdin, check.Equals, &bStdin) + c.Assert(prp.inExecWithOptionsOpts.Stdout, check.Not(check.IsNil)) + c.Assert(prp.inExecWithOptionsOpts.Stderr, check.Not(check.IsNil)) + c.Assert(bStdout.Len() > 0, check.Equals, true) + c.Assert(bStderr.Len() > 0, check.Equals, true) + c.Assert(bStdout.String(), check.Equals, expStdout) + c.Assert(bStderr.String(), check.Equals, expStderr) }, } diff --git a/pkg/kube/pod_controller_test.go b/pkg/kube/pod_controller_test.go index 734889b2bc..b2172ca21c 100644 --- a/pkg/kube/pod_controller_test.go +++ b/pkg/kube/pod_controller_test.go @@ -22,7 +22,7 @@ import ( "time" "github.com/kanisterio/errkit" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" @@ -30,19 +30,19 @@ import ( type PodControllerTestSuite struct{} -var _ = Suite(&PodControllerTestSuite{}) +var _ = check.Suite(&PodControllerTestSuite{}) const ( podControllerNS = "pod-runner-test" podControllerPodName = "test-pod" ) -func (s *PodControllerTestSuite) SetUpSuite(c *C) { +func (s *PodControllerTestSuite) SetUpSuite(c *check.C) { err := os.Setenv("POD_NAMESPACE", podControllerNS) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *PodControllerTestSuite) TestPodControllerStartPod(c *C) { +func (s *PodControllerTestSuite) TestPodControllerStartPod(c *check.C) { ctx := context.Background() cli := fake.NewSimpleClientset() @@ -52,9 +52,9 @@ func (s *PodControllerTestSuite) TestPodControllerStartPod(c *C) { "Pod creation failure": func(pcp *FakePodControllerProcessor, pc PodController) { pcp.CreatePodErr = simulatedError err := pc.StartPod(ctx) - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, simulatedError), Equals, true) - c.Assert(pcp.InCreatePodOptions, DeepEquals, &PodOptions{ + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, simulatedError), check.Equals, true) + c.Assert(pcp.InCreatePodOptions, check.DeepEquals, &PodOptions{ Namespace: podControllerNS, Name: podControllerPodName, }) @@ -66,8 +66,8 @@ func (s *PodControllerTestSuite) TestPodControllerStartPod(c *C) { }, } err := pr.StartPod(ctx) - c.Assert(err, IsNil) - c.Assert(pr.PodName(), Equals, podControllerPodName) + c.Assert(err, check.IsNil) + c.Assert(pr.PodName(), check.Equals, podControllerPodName) }, "Pod already created": func(prp *FakePodControllerProcessor, pr PodController) { prp.CreatePodRet = &corev1.Pod{ @@ -77,16 +77,16 @@ func (s *PodControllerTestSuite) TestPodControllerStartPod(c *C) { } err := pr.StartPod(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) prp.InCreatePodOptions = nil prp.CreatePodRet = nil prp.CreatePodErr = errors.New("CreatePod should not be invoked") err = pr.StartPod(ctx) - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, ErrPodControllerPodAlreadyStarted), Equals, true) - c.Assert(prp.InCreatePodOptions, IsNil) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, ErrPodControllerPodAlreadyStarted), check.Equals, true) + c.Assert(prp.InCreatePodOptions, check.IsNil) }, } @@ -104,7 +104,7 @@ func (s *PodControllerTestSuite) TestPodControllerStartPod(c *C) { } } -func (s *PodControllerTestSuite) TestPodControllerWaitPod(c *C) { +func (s *PodControllerTestSuite) TestPodControllerWaitPod(c *check.C) { ctx := context.Background() cli := fake.NewSimpleClientset() @@ -113,9 +113,9 @@ func (s *PodControllerTestSuite) TestPodControllerWaitPod(c *C) { cases := map[string]func(pcp *FakePodControllerProcessor, pc PodController){ "Waiting failed because pod not started yet": func(pcp *FakePodControllerProcessor, pc PodController) { err := pc.WaitForPodReady(ctx) - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, ErrPodControllerPodNotStarted), Equals, true) - c.Assert(pcp.InCreatePodOptions, IsNil) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, ErrPodControllerPodNotStarted), check.Equals, true) + c.Assert(pcp.InCreatePodOptions, check.IsNil) }, "Waiting failed due to timeout": func(pcp *FakePodControllerProcessor, pc PodController) { pcp.CreatePodRet = &corev1.Pod{ @@ -125,16 +125,16 @@ func (s *PodControllerTestSuite) TestPodControllerWaitPod(c *C) { }, } err := pc.StartPod(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pcp.WaitForPodReadyErr = simulatedError err = pc.WaitForPodReady(ctx) - c.Assert(err, Not(IsNil)) - c.Assert(pcp.InWaitForPodReadyPodName, Equals, podControllerPodName) - c.Assert(pcp.InWaitForPodReadyNamespace, Equals, podControllerNS) - c.Assert(errors.Is(err, pcp.WaitForPodReadyErr), Equals, true) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(pcp.InWaitForPodReadyPodName, check.Equals, podControllerPodName) + c.Assert(pcp.InWaitForPodReadyNamespace, check.Equals, podControllerNS) + c.Assert(errors.Is(err, pcp.WaitForPodReadyErr), check.Equals, true) - c.Assert(err.Error(), Equals, fmt.Sprintf("Pod failed to become ready in time: %s", simulatedError.Error())) + c.Assert(err.Error(), check.Equals, fmt.Sprintf("Pod failed to become ready in time: %s", simulatedError.Error())) // Check that POD deletion was also invoked with expected arguments }, "Waiting succeeded": func(pcp *FakePodControllerProcessor, pc PodController) { @@ -144,9 +144,9 @@ func (s *PodControllerTestSuite) TestPodControllerWaitPod(c *C) { }, } err := pc.StartPod(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = pc.WaitForPodReady(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }, } @@ -164,7 +164,7 @@ func (s *PodControllerTestSuite) TestPodControllerWaitPod(c *C) { } } -func (s *PodControllerTestSuite) TestPodControllerStopPod(c *C) { +func (s *PodControllerTestSuite) TestPodControllerStopPod(c *check.C) { ctx := context.Background() cli := fake.NewSimpleClientset() @@ -174,10 +174,10 @@ func (s *PodControllerTestSuite) TestPodControllerStopPod(c *C) { cases := map[string]func(pcp *FakePodControllerProcessor, pc PodController){ "Pod not started yet": func(pcp *FakePodControllerProcessor, pc PodController) { err := pc.StopPod(ctx, 30*time.Second, int64(0)) - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, ErrPodControllerPodNotStarted), Equals, true) - c.Assert(pcp.InDeletePodPodName, Equals, untouchedStr) - c.Assert(pcp.InDeletePodNamespace, Equals, untouchedStr) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, ErrPodControllerPodNotStarted), check.Equals, true) + c.Assert(pcp.InDeletePodPodName, check.Equals, untouchedStr) + c.Assert(pcp.InDeletePodNamespace, check.Equals, untouchedStr) }, "Pod deletion error": func(pcp *FakePodControllerProcessor, pc PodController) { pcp.CreatePodRet = &corev1.Pod{ @@ -187,12 +187,12 @@ func (s *PodControllerTestSuite) TestPodControllerStopPod(c *C) { }, } err := pc.StartPod(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pcp.DeletePodErr = simulatedError err = pc.StopPod(ctx, 30*time.Second, int64(0)) - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, simulatedError), Equals, true) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, simulatedError), check.Equals, true) }, "Pod successfully deleted": func(pcp *FakePodControllerProcessor, pc PodController) { pcp.CreatePodRet = &corev1.Pod{ @@ -202,14 +202,14 @@ func (s *PodControllerTestSuite) TestPodControllerStopPod(c *C) { }, } err := pc.StartPod(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = pc.StopPod(ctx, 30*time.Second, int64(0)) - c.Assert(err, IsNil) - c.Assert(pcp.InDeletePodPodName, Equals, podControllerPodName) - c.Assert(pcp.InDeletePodNamespace, Equals, podControllerNS) + c.Assert(err, check.IsNil) + c.Assert(pcp.InDeletePodPodName, check.Equals, podControllerPodName) + c.Assert(pcp.InDeletePodNamespace, check.Equals, podControllerNS) gracePeriodSeconds := int64(0) - c.Assert(pcp.InDeletePodOptions, DeepEquals, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds}) + c.Assert(pcp.InDeletePodOptions, check.DeepEquals, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds}) }, } @@ -230,21 +230,21 @@ func (s *PodControllerTestSuite) TestPodControllerStopPod(c *C) { } } -func (s *PodControllerTestSuite) TestPodControllerGetCommandExecutorAndFileWriter(c *C) { +func (s *PodControllerTestSuite) TestPodControllerGetCommandExecutorAndFileWriter(c *check.C) { ctx := context.Background() cli := fake.NewSimpleClientset() cases := map[string]func(pcp *FakePodControllerProcessor, pc PodController){ "Pod not started yet": func(_ *FakePodControllerProcessor, pc PodController) { pce, err := pc.GetCommandExecutor() - c.Assert(pce, IsNil) - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, ErrPodControllerPodNotStarted), Equals, true) + c.Assert(pce, check.IsNil) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, ErrPodControllerPodNotStarted), check.Equals, true) pfw, err := pc.GetFileWriter() - c.Assert(pfw, IsNil) - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, ErrPodControllerPodNotStarted), Equals, true) + c.Assert(pfw, check.IsNil) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, ErrPodControllerPodNotStarted), check.Equals, true) }, "Pod not ready yet": func(pcp *FakePodControllerProcessor, pc PodController) { pcp.CreatePodRet = &corev1.Pod{ @@ -253,17 +253,17 @@ func (s *PodControllerTestSuite) TestPodControllerGetCommandExecutorAndFileWrite }, } err := pc.StartPod(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pce, err := pc.GetCommandExecutor() - c.Assert(pce, IsNil) - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, ErrPodControllerPodNotReady), Equals, true) + c.Assert(pce, check.IsNil) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, ErrPodControllerPodNotReady), check.Equals, true) pfw, err := pc.GetFileWriter() - c.Assert(pfw, IsNil) - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, ErrPodControllerPodNotReady), Equals, true) + c.Assert(pfw, check.IsNil) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, ErrPodControllerPodNotReady), check.Equals, true) }, "CommandExecutor successfully returned": func(pcp *FakePodControllerProcessor, pc PodController) { pcp.CreatePodRet = &corev1.Pod{ @@ -277,20 +277,20 @@ func (s *PodControllerTestSuite) TestPodControllerGetCommandExecutorAndFileWrite }, } err := pc.StartPod(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = pc.WaitForPodReady(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) var epce PodCommandExecutor pce, err := pc.GetCommandExecutor() - c.Assert(err, IsNil) - c.Assert(pce, Implements, &epce) + c.Assert(err, check.IsNil) + c.Assert(pce, check.Implements, &epce) var epfw PodFileWriter pfw, err := pc.GetFileWriter() - c.Assert(err, IsNil) - c.Assert(pfw, Implements, &epfw) + c.Assert(err, check.IsNil) + c.Assert(pfw, check.Implements, &epfw) }, } diff --git a/pkg/kube/pod_file_writer_test.go b/pkg/kube/pod_file_writer_test.go index 2a349066ed..c9c45636b1 100644 --- a/pkg/kube/pod_file_writer_test.go +++ b/pkg/kube/pod_file_writer_test.go @@ -22,13 +22,13 @@ import ( "os" "github.com/kanisterio/errkit" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/client-go/kubernetes/fake" ) type PodFileWriterTestSuite struct{} -var _ = Suite(&PodFileWriterTestSuite{}) +var _ = check.Suite(&PodFileWriterTestSuite{}) const ( podFileWriterNS = "pod-runner-test" @@ -36,9 +36,9 @@ const ( podFileWriterContainerName = "test-container" ) -func (s *PodFileWriterTestSuite) SetUpSuite(c *C) { +func (s *PodFileWriterTestSuite) SetUpSuite(c *check.C) { err := os.Setenv("POD_NAMESPACE", podFileWriterNS) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } type fakePodFileWriterProcessor struct { @@ -76,7 +76,7 @@ func (w *fakePodWriter) Remove(ctx context.Context, namespace, podName, containe var _ PodWriter = (*fakePodWriter)(nil) -func (s *PodFileWriterTestSuite) TestPodRunnerWriteFile(c *C) { +func (s *PodFileWriterTestSuite) TestPodRunnerWriteFile(c *check.C) { ctx := context.Background() cli := fake.NewSimpleClientset() @@ -89,31 +89,31 @@ func (s *PodFileWriterTestSuite) TestPodRunnerWriteFile(c *C) { buf := bytes.NewBuffer([]byte("some file content")) remover, err := pfw.Write(ctx, "/path/to/file", buf) - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, simulatedError), Equals, true) - c.Assert(remover, IsNil) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, simulatedError), check.Equals, true) + c.Assert(remover, check.IsNil) - c.Assert(pfwp.podWriter.inWriteNamespace, Equals, podFileWriterNS) - c.Assert(pfwp.podWriter.inWritePodName, Equals, podFileWriterPodName) - c.Assert(pfwp.podWriter.inWriteContainerName, Equals, podFileWriterContainerName) + c.Assert(pfwp.podWriter.inWriteNamespace, check.Equals, podFileWriterNS) + c.Assert(pfwp.podWriter.inWritePodName, check.Equals, podFileWriterPodName) + c.Assert(pfwp.podWriter.inWriteContainerName, check.Equals, podFileWriterContainerName) }, "Write to pod succeeded": func(pfwp *fakePodFileWriterProcessor, pfw PodFileWriter) { pfwp.podWriter = &fakePodWriter{} buf := bytes.NewBuffer([]byte("some file content")) remover, err := pfw.Write(ctx, "/path/to/file", buf) - c.Assert(err, IsNil) - c.Assert(remover, Not(IsNil)) + c.Assert(err, check.IsNil) + c.Assert(remover, check.Not(check.IsNil)) - c.Assert(pfwp.podWriter.inWriteNamespace, Equals, podFileWriterNS) - c.Assert(pfwp.podWriter.inWritePodName, Equals, podFileWriterPodName) - c.Assert(pfwp.podWriter.inWriteContainerName, Equals, podFileWriterContainerName) + c.Assert(pfwp.podWriter.inWriteNamespace, check.Equals, podFileWriterNS) + c.Assert(pfwp.podWriter.inWritePodName, check.Equals, podFileWriterPodName) + c.Assert(pfwp.podWriter.inWriteContainerName, check.Equals, podFileWriterContainerName) err = remover.Remove(ctx) - c.Assert(err, IsNil) - c.Assert(pfwp.podWriter.inRemoveNamespace, Equals, podFileWriterNS) - c.Assert(pfwp.podWriter.inRemovePodName, Equals, podFileWriterPodName) - c.Assert(pfwp.podWriter.inRemoveContainerName, Equals, podFileWriterContainerName) + c.Assert(err, check.IsNil) + c.Assert(pfwp.podWriter.inRemoveNamespace, check.Equals, podFileWriterNS) + c.Assert(pfwp.podWriter.inRemovePodName, check.Equals, podFileWriterPodName) + c.Assert(pfwp.podWriter.inRemoveContainerName, check.Equals, podFileWriterContainerName) }, "Write to pod succeeded but remove failed": func(pfwp *fakePodFileWriterProcessor, pfw PodFileWriter) { pfwp.podWriter = &fakePodWriter{} @@ -121,19 +121,19 @@ func (s *PodFileWriterTestSuite) TestPodRunnerWriteFile(c *C) { buf := bytes.NewBuffer([]byte("some file content")) remover, err := pfw.Write(ctx, "/path/to/file", buf) - c.Assert(err, IsNil) - c.Assert(remover, Not(IsNil)) + c.Assert(err, check.IsNil) + c.Assert(remover, check.Not(check.IsNil)) - c.Assert(pfwp.podWriter.inWriteNamespace, Equals, podFileWriterNS) - c.Assert(pfwp.podWriter.inWritePodName, Equals, podFileWriterPodName) - c.Assert(pfwp.podWriter.inWriteContainerName, Equals, podFileWriterContainerName) + c.Assert(pfwp.podWriter.inWriteNamespace, check.Equals, podFileWriterNS) + c.Assert(pfwp.podWriter.inWritePodName, check.Equals, podFileWriterPodName) + c.Assert(pfwp.podWriter.inWriteContainerName, check.Equals, podFileWriterContainerName) err = remover.Remove(ctx) - c.Assert(err, Not(IsNil)) - c.Assert(errors.Is(err, simulatedError), Equals, true) - c.Assert(pfwp.podWriter.inRemoveNamespace, Equals, podFileWriterNS) - c.Assert(pfwp.podWriter.inRemovePodName, Equals, podFileWriterPodName) - c.Assert(pfwp.podWriter.inRemoveContainerName, Equals, podFileWriterContainerName) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(errors.Is(err, simulatedError), check.Equals, true) + c.Assert(pfwp.podWriter.inRemoveNamespace, check.Equals, podFileWriterNS) + c.Assert(pfwp.podWriter.inRemovePodName, check.Equals, podFileWriterPodName) + c.Assert(pfwp.podWriter.inRemoveContainerName, check.Equals, podFileWriterContainerName) }, } diff --git a/pkg/kube/pod_runner_test.go b/pkg/kube/pod_runner_test.go index 3cf049d741..a3b8c21d61 100644 --- a/pkg/kube/pod_runner_test.go +++ b/pkg/kube/pod_runner_test.go @@ -20,7 +20,7 @@ import ( "path" "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" @@ -32,19 +32,19 @@ import ( type PodRunnerTestSuite struct{} -var _ = Suite(&PodRunnerTestSuite{}) +var _ = check.Suite(&PodRunnerTestSuite{}) const ( podRunnerNS = "pod-runner-test" podName = "test-pod" ) -func (s *PodRunnerTestSuite) SetUpSuite(c *C) { +func (s *PodRunnerTestSuite) SetUpSuite(c *check.C) { err := os.Setenv("POD_NAMESPACE", podRunnerNS) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *PodRunnerTestSuite) TestPodRunnerContextCanceled(c *C) { +func (s *PodRunnerTestSuite) TestPodRunnerContextCanceled(c *check.C) { ctx, cancel := context.WithCancel(context.Background()) cli := fake.NewSimpleClientset() cli.PrependReactor("create", "pods", func(action testing.Action) (handled bool, ret runtime.Object, err error) { @@ -71,7 +71,7 @@ func (s *PodRunnerTestSuite) TestPodRunnerContextCanceled(c *C) { returned := make(chan struct{}) go func() { _, err := pr.Run(ctx, makePodRunnerTestFunc(deleted)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) close(returned) }() cancel() @@ -79,7 +79,7 @@ func (s *PodRunnerTestSuite) TestPodRunnerContextCanceled(c *C) { <-returned } -func (s *PodRunnerTestSuite) TestPodRunnerForSuccessCase(c *C) { +func (s *PodRunnerTestSuite) TestPodRunnerForSuccessCase(c *check.C) { ctx, cancel := context.WithCancel(context.Background()) cli := fake.NewSimpleClientset() cli.PrependReactor("create", "pods", func(action testing.Action) (handled bool, ret runtime.Object, err error) { @@ -107,7 +107,7 @@ func (s *PodRunnerTestSuite) TestPodRunnerForSuccessCase(c *C) { returned := make(chan struct{}) go func() { _, err := pr.Run(ctx, makePodRunnerTestFunc(deleted)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) close(returned) }() deleted <- struct{}{} @@ -117,7 +117,7 @@ func (s *PodRunnerTestSuite) TestPodRunnerForSuccessCase(c *C) { // TestPodRunnerWithDebugLabelForSuccessCase adds a debug entry into the context and verifies the // pod got created with corresponding label using the entry or not. -func (s *PodRunnerTestSuite) TestPodRunnerWithDebugLabelForSuccessCase(c *C) { +func (s *PodRunnerTestSuite) TestPodRunnerWithDebugLabelForSuccessCase(c *check.C) { jobIDSuffix := consts.LabelSuffixJobID for _, tc := range []struct { name string @@ -174,7 +174,7 @@ func (s *PodRunnerTestSuite) TestPodRunnerWithDebugLabelForSuccessCase(c *C) { errorCh <- err }() deleted <- struct{}{} - c.Assert(<-errorCh, IsNil) + c.Assert(<-errorCh, check.IsNil) cancel() } } diff --git a/pkg/kube/pod_test.go b/pkg/kube/pod_test.go index 3033f03a20..9b6959e17d 100644 --- a/pkg/kube/pod_test.go +++ b/pkg/kube/pod_test.go @@ -25,7 +25,7 @@ import ( "strings" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -48,49 +48,49 @@ const ( controllerSA = "controller-sa" ) -var _ = Suite(&PodSuite{}) +var _ = check.Suite(&PodSuite{}) -func (s *PodSuite) SetUpSuite(c *C) { +func (s *PodSuite) SetUpSuite(c *check.C) { var err error s.cli, err = NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "podtest-", }, } ns, err = s.cli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = ns.Name err = os.Setenv("POD_NAMESPACE", ns.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = os.Setenv("POD_SERVICE_ACCOUNT", controllerSA) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.createServiceAccount(testSAName, s.namespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.createServiceAccount(controllerSA, s.namespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *PodSuite) TearDownSuite(c *C) { +func (s *PodSuite) TearDownSuite(c *check.C) { if s.namespace != "" { err := s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } -func (s *PodSuite) TestPod(c *C) { +func (s *PodSuite) TestPod(c *check.C) { // get controllers's namespace cns, err := GetControllerNamespace() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // get controller's SA sa, err := GetControllerServiceAccount(fake.NewSimpleClientset()) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) testSec := s.createTestSecret(c) defer func() { @@ -203,7 +203,7 @@ func (s *PodSuite) TestPod(c *C) { // we have not specified the SA, if the pod is being created in the // same ns as controller's, controller's SA should have been set. if po.ServiceAccountName == "" && po.Namespace == cns { - c.Assert(pod.Spec.ServiceAccountName, Equals, sa) + c.Assert(pod.Spec.ServiceAccountName, check.Equals, sa) } else { var expectedSA string if po.ServiceAccountName == "" { @@ -211,58 +211,58 @@ func (s *PodSuite) TestPod(c *C) { } else { expectedSA = po.ServiceAccountName } - c.Assert(pod.Spec.ServiceAccountName, Equals, expectedSA) + c.Assert(pod.Spec.ServiceAccountName, check.Equals, expectedSA) } if po.Annotations != nil { - c.Check(pod.ObjectMeta.Annotations, NotNil) - c.Check(pod.ObjectMeta.Annotations, DeepEquals, po.Annotations) + c.Check(pod.ObjectMeta.Annotations, check.NotNil) + c.Check(pod.ObjectMeta.Annotations, check.DeepEquals, po.Annotations) } if po.Name != "" { - c.Assert(pod.ObjectMeta.Name, Equals, po.Name) + c.Assert(pod.ObjectMeta.Name, check.Equals, po.Name) } - c.Check(len(pod.ObjectMeta.Labels), Equals, len(po.Labels)+1) - c.Check(pod.ObjectMeta.Labels[consts.LabelKeyCreatedBy], Equals, consts.LabelValueKanister) + c.Check(len(pod.ObjectMeta.Labels), check.Equals, len(po.Labels)+1) + c.Check(pod.ObjectMeta.Labels[consts.LabelKeyCreatedBy], check.Equals, consts.LabelValueKanister) for key, value := range po.Labels { - c.Check(pod.ObjectMeta.Labels[key], Equals, value) + c.Check(pod.ObjectMeta.Labels[key], check.Equals, value) } if po.Resources.Limits != nil { - c.Assert(pod.Spec.Containers[0].Resources.Limits, NotNil) - c.Assert(pod.Spec.Containers[0].Resources.Limits, DeepEquals, po.Resources.Limits) + c.Assert(pod.Spec.Containers[0].Resources.Limits, check.NotNil) + c.Assert(pod.Spec.Containers[0].Resources.Limits, check.DeepEquals, po.Resources.Limits) } if po.Resources.Requests != nil { - c.Assert(pod.Spec.Containers[0].Resources.Requests, NotNil) - c.Assert(pod.Spec.Containers[0].Resources.Requests, DeepEquals, po.Resources.Requests) + c.Assert(pod.Spec.Containers[0].Resources.Requests, check.NotNil) + c.Assert(pod.Spec.Containers[0].Resources.Requests, check.DeepEquals, po.Resources.Requests) } switch { case po.ContainerName != "": - c.Assert(pod.Spec.Containers[0].Name, Equals, po.ContainerName) + c.Assert(pod.Spec.Containers[0].Name, check.Equals, po.ContainerName) default: - c.Assert(pod.Spec.Containers[0].Name, Equals, DefaultContainerName) + c.Assert(pod.Spec.Containers[0].Name, check.Equals, DefaultContainerName) } switch { case po.RestartPolicy == "": - c.Assert(pod.Spec.RestartPolicy, Equals, corev1.RestartPolicyNever) + c.Assert(pod.Spec.RestartPolicy, check.Equals, corev1.RestartPolicyNever) default: - c.Assert(pod.Spec.RestartPolicy, Equals, po.RestartPolicy) + c.Assert(pod.Spec.RestartPolicy, check.Equals, po.RestartPolicy) } if po.EnvironmentVariables != nil && len(po.EnvironmentVariables) > 0 { - c.Assert(pod.Spec.Containers[0].Env, DeepEquals, po.EnvironmentVariables) + c.Assert(pod.Spec.Containers[0].Env, check.DeepEquals, po.EnvironmentVariables) } - c.Assert(err, IsNil) - c.Assert(WaitForPodReady(ctx, s.cli, po.Namespace, pod.Name), IsNil) - c.Assert(DeletePod(context.Background(), s.cli, pod), IsNil) + c.Assert(err, check.IsNil) + c.Assert(WaitForPodReady(ctx, s.cli, po.Namespace, pod.Name), check.IsNil) + c.Assert(DeletePod(context.Background(), s.cli, pod), check.IsNil) } } -func (s *PodSuite) createTestSecret(c *C) *corev1.Secret { +func (s *PodSuite) createTestSecret(c *check.C) *corev1.Secret { testSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-secret-", @@ -272,7 +272,7 @@ func (s *PodSuite) createTestSecret(c *C) *corev1.Secret { }, } testSecret, err := s.cli.CoreV1().Secrets(s.namespace).Create(context.Background(), testSecret, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) return testSecret } @@ -289,7 +289,7 @@ func (s *PodSuite) createServiceAccount(name, ns string) error { return nil } -func (s *PodSuite) TestPodWithFilesystemModeVolumes(c *C) { +func (s *PodSuite) TestPodWithFilesystemModeVolumes(c *check.C) { cli := fake.NewSimpleClientset() pvcName := "prometheus-ibm-monitoring-prometheus-db-prometheus-ibm-monitoring-prometheus-0" pvc := &corev1.PersistentVolumeClaim{ @@ -306,7 +306,7 @@ func (s *PodSuite) TestPodWithFilesystemModeVolumes(c *C) { }, } pvc, err := cli.CoreV1().PersistentVolumeClaims(s.namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) vols := map[string]VolumeMountOptions{pvc.Name: {MountPath: "/mnt/data1", ReadOnly: PVCContainsReadOnlyAccessMode(pvc)}} ctx := context.Background() var p *corev1.Pod @@ -330,15 +330,15 @@ func (s *PodSuite) TestPodWithFilesystemModeVolumes(c *C) { Command: []string{"sh", "-c", "tail -f /dev/null"}, Volumes: vols, }) - c.Assert(err, IsNil) - c.Assert(WaitForPodReady(ctx, cli, s.namespace, pod.Name), IsNil) - c.Assert(pod.Spec.Volumes, HasLen, 1) - c.Assert(pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName, Equals, pvcName) - c.Assert(pod.Spec.Containers[0].VolumeMounts[0].MountPath, Equals, "/mnt/data1") - c.Assert(len(pod.Spec.Containers[0].VolumeDevices), Equals, 0) + c.Assert(err, check.IsNil) + c.Assert(WaitForPodReady(ctx, cli, s.namespace, pod.Name), check.IsNil) + c.Assert(pod.Spec.Volumes, check.HasLen, 1) + c.Assert(pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName, check.Equals, pvcName) + c.Assert(pod.Spec.Containers[0].VolumeMounts[0].MountPath, check.Equals, "/mnt/data1") + c.Assert(len(pod.Spec.Containers[0].VolumeDevices), check.Equals, 0) } -func (s *PodSuite) TestPodWithFilesystemModeReadOnlyVolumes(c *C) { +func (s *PodSuite) TestPodWithFilesystemModeReadOnlyVolumes(c *check.C) { cli := fake.NewSimpleClientset() pvcName := "pvc-with-read-only-mount" pvc := &corev1.PersistentVolumeClaim{ @@ -355,7 +355,7 @@ func (s *PodSuite) TestPodWithFilesystemModeReadOnlyVolumes(c *C) { }, } pvc, err := cli.CoreV1().PersistentVolumeClaims(s.namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) vols := map[string]VolumeMountOptions{pvc.Name: {MountPath: "/mnt/data1", ReadOnly: PVCContainsReadOnlyAccessMode(pvc)}} ctx := context.Background() var p *corev1.Pod @@ -379,15 +379,15 @@ func (s *PodSuite) TestPodWithFilesystemModeReadOnlyVolumes(c *C) { Command: []string{"sh", "-c", "tail -f /dev/null"}, Volumes: vols, }) - c.Assert(err, IsNil) - c.Assert(WaitForPodReady(ctx, cli, s.namespace, pod.Name), IsNil) - c.Assert(pod.Spec.Volumes, HasLen, 1) - c.Assert(pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName, Equals, pvcName) - c.Assert(pod.Spec.Containers[0].VolumeMounts[0].MountPath, Equals, "/mnt/data1") - c.Assert(len(pod.Spec.Containers[0].VolumeDevices), Equals, 0) + c.Assert(err, check.IsNil) + c.Assert(WaitForPodReady(ctx, cli, s.namespace, pod.Name), check.IsNil) + c.Assert(pod.Spec.Volumes, check.HasLen, 1) + c.Assert(pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName, check.Equals, pvcName) + c.Assert(pod.Spec.Containers[0].VolumeMounts[0].MountPath, check.Equals, "/mnt/data1") + c.Assert(len(pod.Spec.Containers[0].VolumeDevices), check.Equals, 0) } -func (s *PodSuite) TestPodWithBlockModeVolumes(c *C) { +func (s *PodSuite) TestPodWithBlockModeVolumes(c *check.C) { cli := fake.NewSimpleClientset() pvcName := "block-mode-volume" blockMode := corev1.PersistentVolumeBlock @@ -406,7 +406,7 @@ func (s *PodSuite) TestPodWithBlockModeVolumes(c *C) { }, } pvc, err := cli.CoreV1().PersistentVolumeClaims(s.namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) vols := map[string]string{pvc.Name: "/mnt/data1"} ctx := context.Background() var p *corev1.Pod @@ -430,15 +430,15 @@ func (s *PodSuite) TestPodWithBlockModeVolumes(c *C) { Command: []string{"sh", "-c", "tail -f /dev/null"}, BlockVolumes: vols, }) - c.Assert(err, IsNil) - c.Assert(WaitForPodReady(ctx, cli, s.namespace, pod.Name), IsNil) - c.Assert(pod.Spec.Volumes, HasLen, 1) - c.Assert(pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName, Equals, pvcName) - c.Assert(len(pod.Spec.Containers[0].VolumeMounts), Equals, 0) - c.Assert(pod.Spec.Containers[0].VolumeDevices[0].DevicePath, Equals, "/mnt/data1") + c.Assert(err, check.IsNil) + c.Assert(WaitForPodReady(ctx, cli, s.namespace, pod.Name), check.IsNil) + c.Assert(pod.Spec.Volumes, check.HasLen, 1) + c.Assert(pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName, check.Equals, pvcName) + c.Assert(len(pod.Spec.Containers[0].VolumeMounts), check.Equals, 0) + c.Assert(pod.Spec.Containers[0].VolumeDevices[0].DevicePath, check.Equals, "/mnt/data1") } -func (s *PodSuite) TestGetPodLogs(c *C) { +func (s *PodSuite) TestGetPodLogs(c *check.C) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() pod, err := CreatePod(context.Background(), s.cli, &PodOptions{ @@ -454,20 +454,20 @@ func (s *PodSuite) TestGetPodLogs(c *C) { }}, }, }) - c.Assert(err, IsNil) - c.Assert(WaitForPodCompletion(ctx, s.cli, s.namespace, pod.Name), IsNil) + c.Assert(err, check.IsNil) + c.Assert(WaitForPodCompletion(ctx, s.cli, s.namespace, pod.Name), check.IsNil) logs, err := GetPodLogs(ctx, s.cli, s.namespace, pod.Name, pod.Spec.Containers[0].Name) - c.Assert(err, IsNil) - c.Assert(strings.Contains(logs, "hello"), Equals, true) - c.Assert(DeletePod(context.Background(), s.cli, pod), IsNil) + c.Assert(err, check.IsNil) + c.Assert(strings.Contains(logs, "hello"), check.Equals, true) + c.Assert(DeletePod(context.Background(), s.cli, pod), check.IsNil) } -func (s *PodSuite) TestPatchDefaultPodSpecs(c *C) { +func (s *PodSuite) TestPatchDefaultPodSpecs(c *check.C) { defaultSpecs := corev1.PodSpec{ Containers: []corev1.Container{ { Name: "container", - Image: "ghcr.io/kanisterio/kanister-tools:0.110.0", + Image: "ghcr.io/kanisterio/kanister-tools:0.111.0", Command: []string{"sh", "-c", "echo in default specs"}, ImagePullPolicy: corev1.PullPolicy(corev1.PullIfNotPresent), VolumeMounts: []corev1.VolumeMount{ @@ -513,7 +513,7 @@ func (s *PodSuite) TestPatchDefaultPodSpecs(c *C) { Containers: []corev1.Container{ { Name: "container", - Image: "ghcr.io/kanisterio/kanister-tools:0.110.0", + Image: "ghcr.io/kanisterio/kanister-tools:0.111.0", Command: []string{"sh", "-c", "echo in default specs"}, ImagePullPolicy: corev1.PullPolicy(corev1.PullIfNotPresent), VolumeMounts: []corev1.VolumeMount{ @@ -553,7 +553,7 @@ func (s *PodSuite) TestPatchDefaultPodSpecs(c *C) { Containers: []corev1.Container{ { Name: "container", - Image: "ghcr.io/kanisterio/kanister-tools:0.110.0", + Image: "ghcr.io/kanisterio/kanister-tools:0.111.0", Command: []string{"sh", "-c", "echo in default specs"}, ImagePullPolicy: corev1.PullPolicy(corev1.PullIfNotPresent), VolumeMounts: []corev1.VolumeMount{ @@ -607,7 +607,7 @@ func (s *PodSuite) TestPatchDefaultPodSpecs(c *C) { Containers: []corev1.Container{ { Name: "container", - Image: "ghcr.io/kanisterio/kanister-tools:0.110.0", + Image: "ghcr.io/kanisterio/kanister-tools:0.111.0", Command: []string{"sh", "-c", "echo in default specs"}, ImagePullPolicy: corev1.PullPolicy(corev1.PullIfNotPresent), VolumeMounts: []corev1.VolumeMount{ @@ -668,7 +668,7 @@ func (s *PodSuite) TestPatchDefaultPodSpecs(c *C) { Containers: []corev1.Container{ { Name: "container", - Image: "ghcr.io/kanisterio/kanister-tools:0.110.0", + Image: "ghcr.io/kanisterio/kanister-tools:0.111.0", Command: []string{"sh", "-c", "echo in default specs"}, ImagePullPolicy: corev1.PullPolicy(corev1.PullIfNotPresent), VolumeMounts: []corev1.VolumeMount{ @@ -731,7 +731,7 @@ func (s *PodSuite) TestPatchDefaultPodSpecs(c *C) { Containers: []corev1.Container{ { Name: "container", - Image: "ghcr.io/kanisterio/kanister-tools:0.110.0", + Image: "ghcr.io/kanisterio/kanister-tools:0.111.0", Command: []string{"echo", "override command"}, ImagePullPolicy: corev1.PullPolicy(corev1.PullIfNotPresent), VolumeMounts: []corev1.VolumeMount{ @@ -771,7 +771,7 @@ func (s *PodSuite) TestPatchDefaultPodSpecs(c *C) { Containers: []corev1.Container{ { Name: "container", - Image: "ghcr.io/kanisterio/kanister-tools:0.110.0", + Image: "ghcr.io/kanisterio/kanister-tools:0.111.0", Command: []string{"echo", "override command"}, ImagePullPolicy: corev1.PullPolicy(corev1.PullIfNotPresent), VolumeMounts: []corev1.VolumeMount{ @@ -814,7 +814,7 @@ func (s *PodSuite) TestPatchDefaultPodSpecs(c *C) { Containers: []corev1.Container{ { Name: "container", - Image: "ghcr.io/kanisterio/kanister-tools:0.110.0", + Image: "ghcr.io/kanisterio/kanister-tools:0.111.0", Command: []string{"sh", "-c", "echo in default specs"}, ImagePullPolicy: corev1.PullPolicy(corev1.PullIfNotPresent), VolumeMounts: []corev1.VolumeMount{ @@ -844,25 +844,25 @@ func (s *PodSuite) TestPatchDefaultPodSpecs(c *C) { // Run tests for _, test := range tests { override, err := CreateAndMergeJSONPatch(test.BlueprintPodSpecs, test.ActionsetPodSpecs) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) podSpec, err := patchDefaultPodSpecs(defaultSpecs, override) - c.Assert(err, IsNil) - c.Assert(podSpec, DeepEquals, test.Expected) + c.Assert(err, check.IsNil) + c.Assert(podSpec, check.DeepEquals, test.Expected) } } -func (s *PodSuite) TestGetPodReadyWaitTimeout(c *C) { +func (s *PodSuite) TestGetPodReadyWaitTimeout(c *check.C) { // Setup ENV to change the default timeout err := os.Setenv(PodReadyWaitTimeoutEnv, "5") - c.Assert(err, IsNil) - c.Assert(GetPodReadyWaitTimeout(), Equals, time.Minute*5) + c.Assert(err, check.IsNil) + c.Assert(GetPodReadyWaitTimeout(), check.Equals, time.Minute*5) err = os.Unsetenv(PodReadyWaitTimeoutEnv) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check without ENV set - c.Assert(GetPodReadyWaitTimeout(), Equals, DefaultPodReadyWaitTimeout) + c.Assert(GetPodReadyWaitTimeout(), check.Equals, DefaultPodReadyWaitTimeout) } -func (s *PodSuite) TestSetPodSecurityContext(c *C) { +func (s *PodSuite) TestSetPodSecurityContext(c *check.C) { po := &PodOptions{ Namespace: s.namespace, GenerateName: "test-", @@ -876,15 +876,15 @@ func (s *PodSuite) TestSetPodSecurityContext(c *C) { } pod, err := CreatePod(context.Background(), s.cli, po) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) runAsNonRootExpected := true - c.Assert(pod.Spec.SecurityContext.RunAsNonRoot, DeepEquals, &runAsNonRootExpected) + c.Assert(pod.Spec.SecurityContext.RunAsNonRoot, check.DeepEquals, &runAsNonRootExpected) var uidAndGIDExpected int64 = 1000 - c.Assert(*pod.Spec.SecurityContext.RunAsUser, DeepEquals, uidAndGIDExpected) - c.Assert(*pod.Spec.SecurityContext.RunAsGroup, DeepEquals, uidAndGIDExpected) + c.Assert(*pod.Spec.SecurityContext.RunAsUser, check.DeepEquals, uidAndGIDExpected) + c.Assert(*pod.Spec.SecurityContext.RunAsGroup, check.DeepEquals, uidAndGIDExpected) } -func (s *PodSuite) TestSetPodSecurityContextOverridesPodOverride(c *C) { +func (s *PodSuite) TestSetPodSecurityContextOverridesPodOverride(c *check.C) { po := &PodOptions{ Namespace: s.namespace, GenerateName: "test-", @@ -905,15 +905,15 @@ func (s *PodSuite) TestSetPodSecurityContextOverridesPodOverride(c *C) { } pod, err := CreatePod(context.Background(), s.cli, po) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) runAsNonRootExpected := true - c.Assert(pod.Spec.SecurityContext.RunAsNonRoot, DeepEquals, &runAsNonRootExpected) + c.Assert(pod.Spec.SecurityContext.RunAsNonRoot, check.DeepEquals, &runAsNonRootExpected) var uidAndGIDExpected int64 = 1000 - c.Assert(*pod.Spec.SecurityContext.RunAsUser, DeepEquals, uidAndGIDExpected) - c.Assert(*pod.Spec.SecurityContext.RunAsGroup, DeepEquals, uidAndGIDExpected) + c.Assert(*pod.Spec.SecurityContext.RunAsUser, check.DeepEquals, uidAndGIDExpected) + c.Assert(*pod.Spec.SecurityContext.RunAsGroup, check.DeepEquals, uidAndGIDExpected) } -func (s *PodSuite) TestSetLifecycleHook(c *C) { +func (s *PodSuite) TestSetLifecycleHook(c *check.C) { lch := &corev1.Lifecycle{ PostStart: &corev1.LifecycleHandler{ Exec: &corev1.ExecAction{ @@ -931,11 +931,11 @@ func (s *PodSuite) TestSetLifecycleHook(c *C) { } pod, err := CreatePod(context.Background(), s.cli, po) - c.Assert(err, IsNil) - c.Assert(pod.Spec.Containers[0].Lifecycle, DeepEquals, lch) + c.Assert(err, check.IsNil) + c.Assert(pod.Spec.Containers[0].Lifecycle, check.DeepEquals, lch) } -func (s *PodSuite) TestGetRedactedOptions(c *C) { +func (s *PodSuite) TestGetRedactedOptions(c *check.C) { opts := &PodOptions{ Namespace: s.namespace, GenerateName: "test-", @@ -960,15 +960,15 @@ func (s *PodSuite) TestGetRedactedOptions(c *C) { po1 := getRedactedOptions(opts) - c.Assert(po1.Namespace, Equals, opts.Namespace) - c.Assert(po1.GenerateName, Equals, opts.GenerateName) - c.Assert(po1.Image, Equals, opts.Image) - c.Assert(po1.Command, DeepEquals, []string{redactedValue, redactedValue, redactedValue}) - c.Assert(po1.EnvironmentVariables, DeepEquals, []corev1.EnvVar{ + c.Assert(po1.Namespace, check.Equals, opts.Namespace) + c.Assert(po1.GenerateName, check.Equals, opts.GenerateName) + c.Assert(po1.Image, check.Equals, opts.Image) + c.Assert(po1.Command, check.DeepEquals, []string{redactedValue, redactedValue, redactedValue}) + c.Assert(po1.EnvironmentVariables, check.DeepEquals, []corev1.EnvVar{ {Name: "abc", Value: redactedValue}, {Name: "ooo", Value: redactedValue}, }) - c.Assert(po1.PodOverride, DeepEquals, crv1alpha1.JSONMap{ + c.Assert(po1.PodOverride, check.DeepEquals, crv1alpha1.JSONMap{ "containers": []corev1.Container{{ Name: "sidecar", Image: consts.LatestKanisterToolsImage, @@ -990,17 +990,17 @@ func (s *PodSuite) TestGetRedactedOptions(c *C) { }, }) - c.Assert(po2.Namespace, Equals, s.namespace) - c.Assert(po2.Image, Equals, consts.LatestKanisterToolsImage) - c.Assert(po2.Command, IsNil) - c.Assert(po2.EnvironmentVariables, IsNil) - c.Assert(po2.PodOverride, DeepEquals, crv1alpha1.JSONMap{ + c.Assert(po2.Namespace, check.Equals, s.namespace) + c.Assert(po2.Image, check.Equals, consts.LatestKanisterToolsImage) + c.Assert(po2.Command, check.IsNil) + c.Assert(po2.EnvironmentVariables, check.IsNil) + c.Assert(po2.PodOverride, check.DeepEquals, crv1alpha1.JSONMap{ "volumes": []corev1.Volume{{Name: "Fake volume"}}, "containers": 123, }) } -func (s *PodSuite) TestGetRedactedPod(c *C) { +func (s *PodSuite) TestGetRedactedPod(c *check.C) { pod := &corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Some kind", @@ -1032,9 +1032,9 @@ func (s *PodSuite) TestGetRedactedPod(c *C) { p1 := getRedactedPod(pod) - c.Assert(p1.TypeMeta, DeepEquals, pod.TypeMeta) - c.Assert(len(p1.Spec.Containers), Equals, len(pod.Spec.Containers)) - c.Assert(p1.Spec.Containers, DeepEquals, []corev1.Container{ + c.Assert(p1.TypeMeta, check.DeepEquals, pod.TypeMeta) + c.Assert(len(p1.Spec.Containers), check.Equals, len(pod.Spec.Containers)) + c.Assert(p1.Spec.Containers, check.DeepEquals, []corev1.Container{ { Name: "c1", Image: "img1", @@ -1056,7 +1056,7 @@ func (s *PodSuite) TestGetRedactedPod(c *C) { }) } -func (s *PodControllerTestSuite) TestContainerNameFromPodOptsOrDefault(c *C) { +func (s *PodControllerTestSuite) TestContainerNameFromPodOptsOrDefault(c *check.C) { for _, tc := range []struct { podOptsContainerName string expectedContainerName string @@ -1073,17 +1073,17 @@ func (s *PodControllerTestSuite) TestContainerNameFromPodOptsOrDefault(c *C) { name := ContainerNameFromPodOptsOrDefault(&PodOptions{ ContainerName: tc.podOptsContainerName, }) - c.Assert(name, Equals, tc.expectedContainerName) + c.Assert(name, check.Equals, tc.expectedContainerName) } name := ContainerNameFromPodOptsOrDefault(&PodOptions{}) - c.Assert(name, Equals, DefaultContainerName) + c.Assert(name, check.Equals, DefaultContainerName) name = ContainerNameFromPodOptsOrDefault(nil) - c.Assert(name, Equals, DefaultContainerName) + c.Assert(name, check.Equals, DefaultContainerName) } -func (s *PodSuite) TestAddLabels(c *C) { +func (s *PodSuite) TestAddLabels(c *check.C) { for _, tc := range []struct { podOptions *PodOptions labels map[string]string @@ -1160,11 +1160,11 @@ func (s *PodSuite) TestAddLabels(c *C) { }, } { tc.podOptions.AddLabels(tc.labels) - c.Assert(tc.podOptions, DeepEquals, tc.expectedPodOptions) + c.Assert(tc.podOptions, check.DeepEquals, tc.expectedPodOptions) } } -func (s *PodSuite) TestAddAnnotations(c *C) { +func (s *PodSuite) TestAddAnnotations(c *check.C) { for _, tc := range []struct { podOptions *PodOptions annotations map[string]string @@ -1241,6 +1241,6 @@ func (s *PodSuite) TestAddAnnotations(c *C) { }, } { tc.podOptions.AddAnnotations(tc.annotations) - c.Assert(tc.podOptions, DeepEquals, tc.expectedPodOptions) + c.Assert(tc.podOptions, check.DeepEquals, tc.expectedPodOptions) } } diff --git a/pkg/kube/pod_writer_test.go b/pkg/kube/pod_writer_test.go index 0a09d88dda..9cd483ea67 100644 --- a/pkg/kube/pod_writer_test.go +++ b/pkg/kube/pod_writer_test.go @@ -23,7 +23,7 @@ import ( "path/filepath" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -35,20 +35,20 @@ type PodWriteSuite struct { pod *corev1.Pod } -var _ = Suite(&PodWriteSuite{}) +var _ = check.Suite(&PodWriteSuite{}) -func (p *PodWriteSuite) SetUpSuite(c *C) { +func (p *PodWriteSuite) SetUpSuite(c *check.C) { var err error ctx := context.Background() p.cli, err = NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "podwritertest-", }, } ns, err = p.cli.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) p.namespace = ns.Name pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "testpod"}, @@ -63,35 +63,35 @@ func (p *PodWriteSuite) SetUpSuite(c *C) { }, } p.pod, err = p.cli.CoreV1().Pods(p.namespace).Create(ctx, pod, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) var cancel context.CancelFunc ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() - c.Assert(WaitForPodReady(ctx, p.cli, p.namespace, p.pod.Name), IsNil) + c.Assert(WaitForPodReady(ctx, p.cli, p.namespace, p.pod.Name), check.IsNil) p.pod, err = p.cli.CoreV1().Pods(p.namespace).Get(ctx, p.pod.Name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (p *PodWriteSuite) TearDownSuite(c *C) { +func (p *PodWriteSuite) TearDownSuite(c *check.C) { if p.namespace != "" { err := p.cli.CoreV1().Namespaces().Delete(context.TODO(), p.namespace, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } -func (p *PodWriteSuite) TestPodWriter(c *C) { +func (p *PodWriteSuite) TestPodWriter(c *check.C) { path := "/tmp/test.txt" - c.Assert(p.pod.Status.Phase, Equals, corev1.PodRunning) - c.Assert(len(p.pod.Status.ContainerStatuses) > 0, Equals, true) + c.Assert(p.pod.Status.Phase, check.Equals, corev1.PodRunning) + c.Assert(len(p.pod.Status.ContainerStatuses) > 0, check.Equals, true) for _, cs := range p.pod.Status.ContainerStatuses { pw := NewPodWriter(p.cli, path, bytes.NewBufferString("badabing")) err := pw.Write(context.Background(), p.pod.Namespace, p.pod.Name, cs.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cmd := []string{"sh", "-c", "cat " + filepath.Clean(path)} stdout, stderr, err := Exec(context.Background(), p.cli, p.pod.Namespace, p.pod.Name, cs.Name, cmd, nil) - c.Assert(err, IsNil) - c.Assert(stdout, Equals, "badabing") - c.Assert(stderr, Equals, "") + c.Assert(err, check.IsNil) + c.Assert(stdout, check.Equals, "badabing") + c.Assert(stderr, check.Equals, "") err = pw.Remove(context.Background(), p.pod.Namespace, p.pod.Name, cs.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } diff --git a/pkg/kube/podinfo_test.go b/pkg/kube/podinfo_test.go index c840f1d725..336a2a303c 100644 --- a/pkg/kube/podinfo_test.go +++ b/pkg/kube/podinfo_test.go @@ -17,66 +17,66 @@ package kube import ( "os" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/client-go/kubernetes/fake" ) type PodInfoSuite struct{} -var _ = Suite(&PodInfoSuite{}) +var _ = check.Suite(&PodInfoSuite{}) const testPodNamespace = "test-pod-namespace" const testPodName = "test-pod-name" const testPodSA = "test-pod-sa" -func (s *PodInfoSuite) TestGetControllerNamespaceFromEnv(c *C) { +func (s *PodInfoSuite) TestGetControllerNamespaceFromEnv(c *check.C) { err := os.Setenv(PodNSEnvVar, testPodNamespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ns, err := GetControllerNamespace() - c.Assert(err, IsNil) - c.Assert(ns, Equals, testPodNamespace) + c.Assert(err, check.IsNil) + c.Assert(ns, check.Equals, testPodNamespace) err = os.Unsetenv(PodNSEnvVar) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *PodInfoSuite) TestGetControllerNamespaceFromFile(c *C) { +func (s *PodInfoSuite) TestGetControllerNamespaceFromFile(c *check.C) { if _, err := os.Stat(nsFile); os.IsNotExist(err) { c.Skip("Namespace file is not presented. Skipping") } nsFromFile, err := os.ReadFile(nsFile) - c.Assert(err, IsNil) - c.Assert(nsFromFile, NotNil) + c.Assert(err, check.IsNil) + c.Assert(nsFromFile, check.NotNil) ns, err := GetControllerNamespace() - c.Assert(err, IsNil) - c.Assert(ns, Equals, nsFromFile) + c.Assert(err, check.IsNil) + c.Assert(ns, check.Equals, nsFromFile) } -func (s *PodInfoSuite) TestGetControllerPodNameFromEnv(c *C) { +func (s *PodInfoSuite) TestGetControllerPodNameFromEnv(c *check.C) { err := os.Setenv(podNameEnvVar, testPodName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) podName, err := GetControllerPodName() - c.Assert(err, IsNil) - c.Assert(podName, Equals, testPodName) + c.Assert(err, check.IsNil) + c.Assert(podName, check.Equals, testPodName) err = os.Unsetenv(podNameEnvVar) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *PodInfoSuite) TestGetControllerPodNameFromSystem(c *C) { +func (s *PodInfoSuite) TestGetControllerPodNameFromSystem(c *check.C) { hostname, err := os.Hostname() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) podName, err := GetControllerPodName() - c.Assert(err, IsNil) - c.Assert(podName, Equals, hostname) + c.Assert(err, check.IsNil) + c.Assert(podName, check.Equals, hostname) } -func (s *PodInfoSuite) TestGetControllerServiceAccountFromEnv(c *C) { +func (s *PodInfoSuite) TestGetControllerServiceAccountFromEnv(c *check.C) { err := os.Setenv(PodSAEnvVar, testPodSA) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) saName, err := GetControllerServiceAccount(fake.NewSimpleClientset()) - c.Assert(err, IsNil) - c.Assert(saName, Equals, testPodSA) + c.Assert(err, check.IsNil) + c.Assert(saName, check.Equals, testPodSA) err = os.Unsetenv(testPodSA) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } diff --git a/pkg/kube/snapshot/snapshot_test.go b/pkg/kube/snapshot/snapshot_test.go index e7e5b9f514..4cfc17145c 100644 --- a/pkg/kube/snapshot/snapshot_test.go +++ b/pkg/kube/snapshot/snapshot_test.go @@ -24,7 +24,7 @@ import ( "time" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" scv1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -46,7 +46,7 @@ import ( "github.com/kanisterio/kanister/pkg/poll" ) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type SnapshotTestSuite struct { sourceNamespace string @@ -64,7 +64,7 @@ type SnapshotTestSuite struct { storageClassCSIStable string } -var _ = Suite(&SnapshotTestSuite{}) +var _ = check.Suite(&SnapshotTestSuite{}) var ( defaultNamespace = "default" @@ -79,17 +79,17 @@ var ( snapshotNamePrefix = "snap-snapshot-test-" ) -func (s *SnapshotTestSuite) SetUpSuite(c *C) { +func (s *SnapshotTestSuite) SetUpSuite(c *check.C) { suffix := strconv.Itoa(int(time.Now().UnixNano() % 100000)) s.sourceNamespace = "snapshot-test-source-" + suffix s.targetNamespace = "snapshot-test-target-" + suffix ctx := context.Background() cli, err := kube.NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli dynCli, err := kube.NewDynamicClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.dynCli = dynCli s.snapshotterAlpha = snapshot.NewSnapshotAlpha(cli, dynCli) @@ -121,7 +121,7 @@ func (s *SnapshotTestSuite) SetUpSuite(c *C) { } storageClasses, err := cli.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, class := range storageClasses.Items { if class.Provisioner == driverAlpha && *class.VolumeBindingMode == scv1.VolumeBindingImmediate { s.storageClassCSIAlpha = class.Name @@ -135,18 +135,18 @@ func (s *SnapshotTestSuite) SetUpSuite(c *C) { } _, err = cli.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: s.sourceNamespace}}, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = cli.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: s.targetNamespace}}, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *SnapshotTestSuite) TearDownSuite(c *C) { +func (s *SnapshotTestSuite) TearDownSuite(c *check.C) { s.cleanupNamespace(c, s.sourceNamespace) s.cleanupNamespace(c, s.targetNamespace) } -func (s *SnapshotTestSuite) TestVolumeSnapshotFake(c *C) { +func (s *SnapshotTestSuite) TestVolumeSnapshotFake(c *check.C) { snapshotName := "snap-1-fake" volName := "pvc-1-fake" scheme := runtime.NewScheme() @@ -157,7 +157,7 @@ func (s *SnapshotTestSuite) TestVolumeSnapshotFake(c *C) { fakeCli := fake.NewSimpleClientset() size, err := resource.ParseQuantity("1Gi") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: volName, @@ -171,7 +171,7 @@ func (s *SnapshotTestSuite) TestVolumeSnapshotFake(c *C) { }, } _, err = fakeCli.CoreV1().PersistentVolumeClaims(defaultNamespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) for _, fakeSs := range []snapshot.Snapshotter{ snapshot.NewSnapshotAlpha(fakeCli, dynfake.NewSimpleDynamicClient(scheme)), @@ -183,22 +183,22 @@ func (s *SnapshotTestSuite) TestVolumeSnapshotFake(c *C) { Namespace: defaultNamespace, } err = fakeSs.Create(context.Background(), volName, &fakeClass, false, snapshotMeta) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) snap, err := fakeSs.Get(context.Background(), snapshotName, defaultNamespace) - c.Assert(err, IsNil) - c.Assert(snap.Name, Equals, snapshotName) + c.Assert(err, check.IsNil) + c.Assert(snap.Name, check.Equals, snapshotName) err = fakeSs.Create(context.Background(), volName, &fakeClass, false, snapshotMeta) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) deletedSnap, err := fakeSs.Delete(context.Background(), snap.Name, snap.Namespace) - c.Assert(err, IsNil) - c.Assert(deletedSnap.Name, Equals, snap.Name) + c.Assert(err, check.IsNil) + c.Assert(deletedSnap.Name, check.Equals, snap.Name) _, err = fakeSs.Delete(context.Background(), snap.Name, snap.Namespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } -func (s *SnapshotTestSuite) TestVolumeSnapshotClassCloneFake(c *C) { +func (s *SnapshotTestSuite) TestVolumeSnapshotClassCloneFake(c *check.C) { ctx := context.Background() scheme := runtime.NewScheme() scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Kind: "VolumeSnapshotClassList"}, &unstructured.UnstructuredList{}) @@ -248,33 +248,33 @@ func (s *SnapshotTestSuite) TestVolumeSnapshotClassCloneFake(c *C) { annotationKeyToRemove: "true", }) _, err := dynCli.Resource(tc.snapClassGVR).Create(ctx, tc.sourceSnapClassSpec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = tc.snapshotter.CloneVolumeSnapshotClass(ctx, tc.sourceSnapClassSpec.GetName(), "targetClass", snapshot.DeletionPolicyRetain, []string{annotationKeyToRemove}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // New VSC exists createdVSC, err := dynCli.Resource(tc.snapClassGVR).Get(ctx, "targetClass", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Annotations are set correctly - c.Assert(createdVSC.GetAnnotations(), DeepEquals, map[string]string{annotationKeyToKeep: "true"}) - c.Assert(createdVSC.GetLabels(), DeepEquals, map[string]string{snapshot.CloneVolumeSnapshotClassLabelName: tc.sourceSnapClassSpec.GetName()}) + c.Assert(createdVSC.GetAnnotations(), check.DeepEquals, map[string]string{annotationKeyToKeep: "true"}) + c.Assert(createdVSC.GetLabels(), check.DeepEquals, map[string]string{snapshot.CloneVolumeSnapshotClassLabelName: tc.sourceSnapClassSpec.GetName()}) // Parameters are set correctly - c.Assert(createdVSC.Object["parameters"], DeepEquals, snapshot.Mss2msi(fakeParams)) + c.Assert(createdVSC.Object["parameters"], check.DeepEquals, snapshot.Mss2msi(fakeParams)) // Lookup by old annotation correctly returns the source VSC scWithOldAnnotation, err := tc.snapshotter.GetVolumeSnapshotClass(ctx, annotationKeyToRemove, "true", fakeSC) - c.Assert(err, IsNil) - c.Assert(scWithOldAnnotation, Equals, tc.sourceSnapClassSpec.GetName()) + c.Assert(err, check.IsNil) + c.Assert(scWithOldAnnotation, check.Equals, tc.sourceSnapClassSpec.GetName()) // Clone again succeeds err = tc.snapshotter.CloneVolumeSnapshotClass(ctx, tc.sourceSnapClassSpec.GetName(), "targetClass", snapshot.DeletionPolicyRetain, []string{annotationKeyToRemove}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } -func (s *SnapshotTestSuite) TestVolumeSnapshotCloneFake(c *C) { +func (s *SnapshotTestSuite) TestVolumeSnapshotCloneFake(c *check.C) { ctx := context.Background() fakeSnapshotName := "snap-1-fake" fakeContentName := "snapcontent-1-fake" @@ -371,14 +371,14 @@ func (s *SnapshotTestSuite) TestVolumeSnapshotCloneFake(c *C) { "creationTime": metav1.Now().Format("2006-01-02T15:04:05Z"), } _, err := dynCli.Resource(tc.snapClassGVR).Create(ctx, tc.snapClassSpec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = dynCli.Resource(tc.snapGVR).Namespace(defaultNamespace).Create(ctx, tc.snapSpec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = dynCli.Resource(tc.contentGVR).Create(ctx, tc.contentSpec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = tc.fakeSs.Get(context.Background(), fakeSnapshotName, defaultNamespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = tc.fakeSs.Clone(context.Background(), fakeSnapshotName, defaultNamespace, false, snapshot.ObjectMeta{ Name: fakeClone, @@ -389,27 +389,27 @@ func (s *SnapshotTestSuite) TestVolumeSnapshotCloneFake(c *C) { Annotations: fakeContentAnnotation, }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) clone, err := tc.fakeSs.Get(context.Background(), fakeClone, fakeTargetNamespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) us, err := dynCli.Resource(tc.contentGVR).Get(ctx, *clone.Spec.Source.VolumeSnapshotContentName, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = snapshot.TransformUnstructured(us, tc.snapContentObject) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) if cloneContent, ok := tc.snapContentObject.(*v1alpha1.VolumeSnapshotContent); ok { - c.Assert(strings.HasPrefix(cloneContent.Name, fakeClone), Equals, true) - c.Assert(cloneContent.Spec.DeletionPolicy, Equals, tc.snapClassSpec.Object["deletionPolicy"]) + c.Assert(strings.HasPrefix(cloneContent.Name, fakeClone), check.Equals, true) + c.Assert(cloneContent.Spec.DeletionPolicy, check.Equals, tc.snapClassSpec.Object["deletionPolicy"]) } if cloneContent, ok := tc.snapContentObject.(*v1beta1.VolumeSnapshotContent); ok { - c.Assert(strings.HasPrefix(cloneContent.Name, fakeClone), Equals, true) - c.Assert(cloneContent.Spec.DeletionPolicy, Equals, tc.snapClassSpec.Object["deletionPolicy"]) + c.Assert(strings.HasPrefix(cloneContent.Name, fakeClone), check.Equals, true) + c.Assert(cloneContent.Spec.DeletionPolicy, check.Equals, tc.snapClassSpec.Object["deletionPolicy"]) } } } -func (s *SnapshotTestSuite) TestWaitOnReadyToUse(c *C) { +func (s *SnapshotTestSuite) TestWaitOnReadyToUse(c *check.C) { snapshotNameBase := "snap-1-fake" volName := "pvc-1-fake" scheme := runtime.NewScheme() @@ -420,7 +420,7 @@ func (s *SnapshotTestSuite) TestWaitOnReadyToUse(c *C) { fakeCli := fake.NewSimpleClientset() size, err := resource.ParseQuantity("1Gi") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -435,7 +435,7 @@ func (s *SnapshotTestSuite) TestWaitOnReadyToUse(c *C) { }, } _, err = fakeCli.CoreV1().PersistentVolumeClaims(defaultNamespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) dynCli := dynfake.NewSimpleDynamicClient(scheme) @@ -464,21 +464,21 @@ func (s *SnapshotTestSuite) TestWaitOnReadyToUse(c *C) { Namespace: defaultNamespace, } err = fakeSs.Create(ctx, volName, &fakeClass, false, snapshotMeta) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // This function should timeout timeout := 500 * time.Millisecond bgTimeout := 5 * time.Second // We don't have readyToUse and no error, waiting indefinitely err = waitOnReadyToUseWithTimeout(c, ctx, fakeSs, snapshotName, defaultNamespace, timeout) - c.Assert(err, NotNil) - c.Assert(err.Error(), Matches, ".*context deadline exceeded*") + c.Assert(err, check.NotNil) + c.Assert(err.Error(), check.Matches, ".*context deadline exceeded*") reply := waitOnReadyToUseInBackground(c, ctx, fakeSs, snapshotName, defaultNamespace, bgTimeout) setReadyStatus(c, dynCli, volumeSnapshotGVR, snapshotName, defaultNamespace) select { case err = <-reply: - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) case <-time.After(2 * time.Second): c.Error("timeout waiting on ready to use") } @@ -491,8 +491,8 @@ func (s *SnapshotTestSuite) TestWaitOnReadyToUse(c *C) { // If there is non-transient error, exit right away err = waitOnReadyToUseWithTimeout(c, ctx, fakeSs, snapshotName, defaultNamespace, timeout) - c.Assert(err, NotNil) - c.Assert(err.Error(), Matches, ".*some error.*") + c.Assert(err, check.NotNil) + c.Assert(err.Error(), check.Matches, ".*some error.*") // Set transient error message = "the object has been modified; please apply your changes to the latest version and try again" @@ -500,14 +500,14 @@ func (s *SnapshotTestSuite) TestWaitOnReadyToUse(c *C) { // If there is a transient error, wait with exp backoff which is long err = waitOnReadyToUseWithTimeout(c, ctx, fakeSs, snapshotName, defaultNamespace, timeout) - c.Assert(err, NotNil) - c.Assert(err.Error(), Matches, ".*context deadline exceeded*") + c.Assert(err, check.NotNil) + c.Assert(err.Error(), check.Matches, ".*context deadline exceeded*") reply = waitOnReadyToUseInBackground(c, ctx, fakeSs, snapshotName, defaultNamespace, bgTimeout) setReadyStatus(c, dynCli, volumeSnapshotGVR, snapshotName, defaultNamespace) select { case err = <-reply: - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) case <-time.After(2 * time.Second): c.Error("timeout waiting on ready to use") } @@ -518,7 +518,7 @@ func (s *SnapshotTestSuite) TestWaitOnReadyToUse(c *C) { // ---------------------------------------------------------------------------- func waitOnReadyToUseInBackground( - c *C, + c *check.C, ctx context.Context, fakeSs snapshot.Snapshotter, snapshotName string, @@ -534,7 +534,7 @@ func waitOnReadyToUseInBackground( } func waitOnReadyToUseWithTimeout( - c *C, + c *check.C, ctx context.Context, fakeSs snapshot.Snapshotter, snapshotName string, @@ -550,7 +550,7 @@ func waitOnReadyToUseWithTimeout( } func setReadyStatus( - c *C, + c *check.C, dynCli *dynfake.FakeDynamicClient, volumeSnapshotGVR schema.GroupVersionResource, snapshotName string, @@ -564,7 +564,7 @@ func setReadyStatus( } func setErrorStatus( - c *C, + c *check.C, dynCli *dynfake.FakeDynamicClient, volumeSnapshotGVR schema.GroupVersionResource, snapshotName string, @@ -579,7 +579,7 @@ func setErrorStatus( } func setVolumeSnapshotStatus( - c *C, + c *check.C, dynCli *dynfake.FakeDynamicClient, volumeSnapshotGVR schema.GroupVersionResource, snapshotName string, @@ -590,15 +590,15 @@ func setVolumeSnapshotStatus( defer cancel() us, err := dynCli.Resource(volumeSnapshotGVR).Namespace(namespace).Get(ctx, snapshotName, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) us.Object["status"] = status _, err = dynCli.Resource(volumeSnapshotGVR).Namespace(namespace).UpdateStatus(ctx, us, metav1.UpdateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } // ---------------------------------------------------------------------------- -func (s *SnapshotTestSuite) TestVolumeSnapshotAlpha(c *C) { +func (s *SnapshotTestSuite) TestVolumeSnapshotAlpha(c *check.C) { if s.snapshotClassAlpha == nil { c.Skip("No v1alpha1 Volumesnapshotclass in the cluster") } @@ -610,7 +610,7 @@ func (s *SnapshotTestSuite) TestVolumeSnapshotAlpha(c *C) { s.testVolumeSnapshot(c, s.snapshotterAlpha, s.storageClassCSIAlpha, s.snapshotClassAlpha) } -func (s *SnapshotTestSuite) TestVolumeSnapshotBeta(c *C) { +func (s *SnapshotTestSuite) TestVolumeSnapshotBeta(c *check.C) { if s.snapshotClassBeta == nil { c.Skip("No v1beta1 Volumesnapshotclass in the cluster") } @@ -622,7 +622,7 @@ func (s *SnapshotTestSuite) TestVolumeSnapshotBeta(c *C) { s.testVolumeSnapshot(c, s.snapshotterBeta, s.storageClassCSIBeta, s.snapshotClassBeta) } -func (s *SnapshotTestSuite) TestVolumeSnapshotStable(c *C) { +func (s *SnapshotTestSuite) TestVolumeSnapshotStable(c *check.C) { if s.snapshotClassStable == nil { c.Skip("No v1 Volumesnapshotclass in the cluster") } @@ -634,12 +634,12 @@ func (s *SnapshotTestSuite) TestVolumeSnapshotStable(c *C) { s.testVolumeSnapshot(c, s.snapshotterStable, s.storageClassCSIStable, s.snapshotClassStable) } -func (s *SnapshotTestSuite) testVolumeSnapshot(c *C, snapshotter snapshot.Snapshotter, storageClass string, snapshotClass *string) { +func (s *SnapshotTestSuite) testVolumeSnapshot(c *check.C, snapshotter snapshot.Snapshotter, storageClass string, snapshotClass *string) { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() size, err := resource.ParseQuantity("1Gi") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -657,7 +657,7 @@ func (s *SnapshotTestSuite) testVolumeSnapshot(c *C, snapshotter snapshot.Snapsh }, } pvc, err = s.cli.CoreV1().PersistentVolumeClaims(s.sourceNamespace).Create(ctx, pvc, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = poll.Wait(ctx, func(ctx context.Context) (bool, error) { pvc, err = s.cli.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) if err != nil { @@ -665,7 +665,7 @@ func (s *SnapshotTestSuite) testVolumeSnapshot(c *C, snapshotter snapshot.Snapsh } return pvc.Status.Phase == corev1.ClaimBound, nil }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) snapshotName := snapshotNamePrefix + strconv.Itoa(int(time.Now().UnixNano())) wait := true @@ -682,25 +682,25 @@ func (s *SnapshotTestSuite) testVolumeSnapshot(c *C, snapshotter snapshot.Snapsh Annotations: annotations, } err = snapshotter.Create(ctx, pvc.Name, snapshotClass, wait, snapshotMeta) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) snap, err := snapshotter.Get(ctx, snapshotName, s.sourceNamespace) - c.Assert(err, IsNil) - c.Assert(snap.Name, Equals, snapshotName) - c.Assert(snap.Status.ReadyToUse, NotNil) - c.Assert(*snap.Status.ReadyToUse, Equals, true) + c.Assert(err, check.IsNil) + c.Assert(snap.Name, check.Equals, snapshotName) + c.Assert(snap.Status.ReadyToUse, check.NotNil) + c.Assert(*snap.Status.ReadyToUse, check.Equals, true) snapList, err := snapshotter.List(ctx, s.sourceNamespace, label) - c.Assert(err, IsNil) - c.Assert(len(snapList.Items), Equals, 1) - c.Assert(snapList.Items[0].Labels, DeepEquals, label) + c.Assert(err, check.IsNil) + c.Assert(len(snapList.Items), check.Equals, 1) + c.Assert(snapList.Items[0].Labels, check.DeepEquals, label) snapshotMeta = snapshot.ObjectMeta{ Name: snapshotName, Namespace: s.sourceNamespace, Annotations: annotations, } err = snapshotter.Create(ctx, pvc.Name, snapshotClass, wait, snapshotMeta) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) snapshotCloneName := snapshotName + "-clone" volumeCloneName := pvc.Name + "-clone" @@ -721,12 +721,12 @@ func (s *SnapshotTestSuite) testVolumeSnapshot(c *C, snapshotter snapshot.Snapsh Annotations: contentAnnotation, } err = snapshotter.Clone(ctx, snapshotName, s.sourceNamespace, wait, snapshotMeta, snapshotContentMeta) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) snapList, err = snapshotter.List(ctx, s.targetNamespace, label) - c.Assert(err, IsNil) - c.Assert(len(snapList.Items), Equals, 1) - c.Assert(snapList.Items[0].Labels, DeepEquals, label) + c.Assert(err, check.IsNil) + c.Assert(len(snapList.Items), check.Equals, 1) + c.Assert(snapList.Items[0].Labels, check.DeepEquals, label) args := &volume.CreatePVCFromSnapshotArgs{ KubeCli: s.cli, @@ -739,7 +739,7 @@ func (s *SnapshotTestSuite) testVolumeSnapshot(c *C, snapshotter snapshot.Snapsh Labels: nil, } _, err = volume.CreatePVCFromSnapshot(ctx, args) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _ = poll.Wait(ctx, func(ctx context.Context) (bool, error) { pvc, err = s.cli.CoreV1().PersistentVolumeClaims(s.targetNamespace).Get(ctx, volumeCloneName, metav1.GetOptions{}) if err != nil { @@ -764,27 +764,27 @@ func (s *SnapshotTestSuite) testVolumeSnapshot(c *C, snapshotter snapshot.Snapsh }, } _, err = volume.CreatePVCFromSnapshot(ctx, args) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _ = poll.Wait(ctx, func(ctx context.Context) (bool, error) { pvc, err = s.cli.CoreV1().PersistentVolumeClaims(s.targetNamespace).Get(ctx, volumeCloneName, metav1.GetOptions{}) if err != nil { return false, err } - c.Assert(pvc.Labels, DeepEquals, args.Labels) + c.Assert(pvc.Labels, check.DeepEquals, args.Labels) return pvc.Status.Phase == corev1.ClaimBound, nil }) _, err = snapshotter.Delete(ctx, snap.Name, snap.Namespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = snapshotter.Delete(ctx, snap.Name, snap.Namespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = snapshotter.Delete(ctx, snapshotCloneName, s.targetNamespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *SnapshotTestSuite) cleanupNamespace(c *C, ns string) { +func (s *SnapshotTestSuite) cleanupNamespace(c *check.C, ns string) { ctx := context.Background() pvcs, erra := s.cli.CoreV1().PersistentVolumeClaims(ns).List(ctx, metav1.ListOptions{}) if erra != nil { @@ -820,14 +820,14 @@ func (s *SnapshotTestSuite) cleanupNamespace(c *C, ns string) { } } -func (s *SnapshotTestSuite) TestNewSnapshotter(c *C) { +func (s *SnapshotTestSuite) TestNewSnapshotter(c *check.C) { fakeCli := fake.NewSimpleClientset() _, err := snapshot.NewSnapshotter(fakeCli, nil) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) for _, tc := range []struct { apiResources metav1.APIResourceList expected string - check Checker + check check.Checker }{ { apiResources: metav1.APIResourceList{ @@ -838,7 +838,7 @@ func (s *SnapshotTestSuite) TestNewSnapshotter(c *C) { GroupVersion: "snapshot.storage.k8s.io/v1alpha1", }, expected: "*snapshot.SnapshotAlpha", - check: IsNil, + check: check.IsNil, }, { apiResources: metav1.APIResourceList{ @@ -849,7 +849,7 @@ func (s *SnapshotTestSuite) TestNewSnapshotter(c *C) { GroupVersion: "snapshot.storage.k8s.io/v1beta1", }, expected: "*snapshot.SnapshotBeta", - check: IsNil, + check: check.IsNil, }, { apiResources: metav1.APIResourceList{ @@ -860,14 +860,14 @@ func (s *SnapshotTestSuite) TestNewSnapshotter(c *C) { GroupVersion: "snapshot.storage.k8s.io/v1", }, expected: "*snapshot.SnapshotStable", - check: IsNil, + check: check.IsNil, }, } { apiRes := tc.apiResources fakeCli.Resources = []*metav1.APIResourceList{&apiRes} ss, err := snapshot.NewSnapshotter(fakeCli, nil) c.Assert(err, tc.check) - c.Assert(reflect.TypeOf(ss).String(), Equals, tc.expected) + c.Assert(reflect.TypeOf(ss).String(), check.Equals, tc.expected) } } @@ -881,10 +881,10 @@ type snapshotClassTC struct { snapClassStable *unstructured.Unstructured testKey string testValue string - check Checker + check check.Checker } -func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *C) { +func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *check.C) { ctx := context.Background() scheme := runtime.NewScheme() scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Kind: "VolumeSnapshotClassList"}, &unstructured.UnstructuredList{}) @@ -910,11 +910,11 @@ func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *C) { fakeSsBeta := snapshot.NewSnapshotBeta(kubeCli, dynCli) fakeSsStable := snapshot.NewSnapshotStable(kubeCli, dynCli) _, err := fakeSsAlpha.GetVolumeSnapshotClass(ctx, "test-annotation", "value", fakeSC) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) _, err = fakeSsBeta.GetVolumeSnapshotClass(ctx, "test-annotation", "value", fakeSC) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) _, err = fakeSsStable.GetVolumeSnapshotClass(ctx, "test-annotation", "value", fakeSC) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) for _, tc := range []snapshotClassTC{ { @@ -927,7 +927,7 @@ func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *C) { snapClassStable: snapshot.UnstructuredVolumeSnapshotClass(snapshot.VolSnapClassGVR, "test-1", fakeDriver, "Delete", nil), testKey: "test-1", testValue: "true", - check: IsNil, + check: check.IsNil, }, { name: "test-2", @@ -939,7 +939,7 @@ func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *C) { snapClassStable: snapshot.UnstructuredVolumeSnapshotClass(snapshot.VolSnapClassGVR, "test-2", fakeDriver, "Delete", nil), testKey: "", testValue: "", - check: IsNil, + check: check.IsNil, }, { name: "test-3", @@ -951,7 +951,7 @@ func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *C) { snapClassStable: snapshot.UnstructuredVolumeSnapshotClass(snapshot.VolSnapClassGVR, "test-2", fakeDriver, "Delete", nil), testKey: "invalid", testValue: "false", - check: NotNil, + check: check.NotNil, }, { name: "test-4", @@ -963,7 +963,7 @@ func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *C) { snapClassStable: snapshot.UnstructuredVolumeSnapshotClass(snapshot.VolSnapClassGVR, "test-4", fakeDriver, "Delete", nil), testKey: "test-4", testValue: "true", - check: NotNil, + check: check.NotNil, }, { name: "test-5", @@ -975,7 +975,7 @@ func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *C) { snapClassStable: snapshot.UnstructuredVolumeSnapshotClass(snapshot.VolSnapClassGVR, "test-5", fakeDriver, "Delete", nil), testKey: "test-5", testValue: "true", - check: NotNil, + check: check.NotNil, }, { name: "test-6", @@ -987,7 +987,7 @@ func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *C) { snapClassStable: snapshot.UnstructuredVolumeSnapshotClass(snapshot.VolSnapClassGVR, "test-6", "driverMismatch", "Delete", nil), testKey: "test-6", testValue: "true", - check: NotNil, + check: check.NotNil, }, { // missing driver/snapshotter in unsturctured object name: "test-7", @@ -1026,7 +1026,7 @@ func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *C) { }, testKey: "test-7", testValue: "true", - check: NotNil, + check: check.NotNil, }, { // bad kind name: "test-8", @@ -1065,7 +1065,7 @@ func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *C) { }, testKey: "test-8", testValue: "true", - check: NotNil, + check: check.NotNil, }, { // not driver string name: "test-9", @@ -1113,7 +1113,7 @@ func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *C) { }, testKey: "test-9", testValue: "true", - check: NotNil, + check: check.NotNil, }, } { tc.testGetSnapshotClass(c, dynCli, fakeSsAlpha, tc.snapClassAlpha, v1alpha1.VolSnapClassGVR) @@ -1122,7 +1122,7 @@ func (s *SnapshotTestSuite) TestGetVolumeSnapshotClassFake(c *C) { } } -func (tc snapshotClassTC) testGetSnapshotClass(c *C, dynCli dynamic.Interface, fakeSs snapshot.Snapshotter, snapClass *unstructured.Unstructured, gvr schema.GroupVersionResource) { +func (tc snapshotClassTC) testGetSnapshotClass(c *check.C, dynCli dynamic.Interface, fakeSs snapshot.Snapshotter, snapClass *unstructured.Unstructured, gvr schema.GroupVersionResource) { // Add annotations ctx := context.Background() snapClass.Object["metadata"] = map[string]interface{}{ @@ -1132,19 +1132,19 @@ func (tc snapshotClassTC) testGetSnapshotClass(c *C, dynCli dynamic.Interface, f }, } _, err := dynCli.Resource(gvr).Create(ctx, snapClass, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err := dynCli.Resource(gvr).Delete(context.TODO(), tc.name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() name, err := fakeSs.GetVolumeSnapshotClass(ctx, tc.testKey, tc.testValue, tc.storageClassName) - c.Assert(err, tc.check, Commentf("%s", tc.testKey)) + c.Assert(err, tc.check, check.Commentf("%s", tc.testKey)) if err == nil { - c.Assert(name, Equals, tc.name) + c.Assert(name, check.Equals, tc.name) } } -func findSnapshotClassName(c *C, ctx context.Context, dynCli dynamic.Interface, gvr schema.GroupVersionResource, object interface{}) (string, string) { +func findSnapshotClassName(c *check.C, ctx context.Context, dynCli dynamic.Interface, gvr schema.GroupVersionResource, object interface{}) (string, string) { // Find alpha VolumeSnapshotClass name us, err := dynCli.Resource(gvr).List(ctx, metav1.ListOptions{}) if err != nil && !apierrors.IsNotFound(err) { @@ -1187,7 +1187,7 @@ func findSnapshotClassName(c *C, ctx context.Context, dynCli dynamic.Interface, return snapshotClass, snapshotterName } -func (s *SnapshotTestSuite) TestCreateFromSourceAlpha(c *C) { +func (s *SnapshotTestSuite) TestCreateFromSourceAlpha(c *check.C) { ctx := context.Background() namespace := "namespace" snapshotName := "snapname" @@ -1213,25 +1213,25 @@ func (s *SnapshotTestSuite) TestCreateFromSourceAlpha(c *C) { kubeCli := fake.NewSimpleClientset() snapshotterAlpha, ok := snapshot.NewSnapshotAlpha(kubeCli, dynCli).(*snapshot.SnapshotAlpha) - c.Assert(ok, Equals, true) + c.Assert(ok, check.Equals, true) // set true err := snapshotterAlpha.UpdateVolumeSnapshotStatusAlpha(ctx, namespace, snapshotName, true) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) us, err := dynCli.Resource(v1alpha1.VolSnapGVR).Namespace(namespace).Get(ctx, snapshotName, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) status, ok := us.Object["status"].(map[string]interface{}) - c.Assert(ok, Equals, true) - c.Assert(status["readyToUse"], Equals, true) + c.Assert(ok, check.Equals, true) + c.Assert(status["readyToUse"], check.Equals, true) // set false err = snapshotterAlpha.UpdateVolumeSnapshotStatusAlpha(ctx, namespace, snapshotName, false) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) us, err = dynCli.Resource(v1alpha1.VolSnapGVR).Namespace(namespace).Get(ctx, snapshotName, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) status, ok = us.Object["status"].(map[string]interface{}) - c.Assert(ok, Equals, true) - c.Assert(status["readyToUse"], Equals, false) + c.Assert(ok, check.Equals, true) + c.Assert(status["readyToUse"], check.Equals, false) snapshotMeta = snapshot.ObjectMeta{ Name: snapshotName, @@ -1245,12 +1245,12 @@ func (s *SnapshotTestSuite) TestCreateFromSourceAlpha(c *C) { snapshotMeta, snapshotContentMeta) dynCli = dynfake.NewSimpleDynamicClient(scheme, volSnap) snapshotterAlpha, ok = snapshot.NewSnapshotAlpha(kubeCli, dynCli).(*snapshot.SnapshotAlpha) - c.Assert(ok, Equals, true) + c.Assert(ok, check.Equals, true) err = snapshotterAlpha.UpdateVolumeSnapshotStatusAlpha(ctx, namespace, snapshotName, false) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (s *SnapshotTestSuite) TestCreateFromSourceBeta(c *C) { +func (s *SnapshotTestSuite) TestCreateFromSourceBeta(c *check.C) { ctx := context.Background() namespace := "namespace" snapshotName := "snapname" @@ -1272,25 +1272,25 @@ func (s *SnapshotTestSuite) TestCreateFromSourceBeta(c *C) { kubeCli := fake.NewSimpleClientset() snapshotterBeta, ok := snapshot.NewSnapshotBeta(kubeCli, dynCli).(*snapshot.SnapshotBeta) - c.Assert(ok, Equals, true) + c.Assert(ok, check.Equals, true) // set true err := snapshotterBeta.UpdateVolumeSnapshotStatusBeta(ctx, namespace, snapshotName, true) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) us, err := dynCli.Resource(v1beta1.VolSnapGVR).Namespace(namespace).Get(ctx, snapshotName, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) status, ok := us.Object["status"].(map[string]interface{}) - c.Assert(ok, Equals, true) - c.Assert(status["readyToUse"], Equals, true) + c.Assert(ok, check.Equals, true) + c.Assert(status["readyToUse"], check.Equals, true) // set false err = snapshotterBeta.UpdateVolumeSnapshotStatusBeta(ctx, namespace, snapshotName, false) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) us, err = dynCli.Resource(v1beta1.VolSnapGVR).Namespace(namespace).Get(ctx, snapshotName, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) status, ok = us.Object["status"].(map[string]interface{}) - c.Assert(ok, Equals, true) - c.Assert(status["readyToUse"], Equals, false) + c.Assert(ok, check.Equals, true) + c.Assert(status["readyToUse"], check.Equals, false) // status not set snapshotMeta = snapshot.ObjectMeta{ @@ -1303,12 +1303,12 @@ func (s *SnapshotTestSuite) TestCreateFromSourceBeta(c *C) { volSnap = snapshot.UnstructuredVolumeSnapshot(v1beta1.VolSnapGVR, "pvcName", snapshotClass, snapshotMeta, snapshotContentMeta) dynCli = dynfake.NewSimpleDynamicClient(scheme, volSnap) snapshotterBeta, ok = snapshot.NewSnapshotBeta(kubeCli, dynCli).(*snapshot.SnapshotBeta) - c.Assert(ok, Equals, true) + c.Assert(ok, check.Equals, true) err = snapshotterBeta.UpdateVolumeSnapshotStatusBeta(ctx, namespace, snapshotName, false) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (s *SnapshotTestSuite) TestCreateFromSource(c *C) { +func (s *SnapshotTestSuite) TestCreateFromSource(c *check.C) { ctx := context.Background() namespace := "namespace" existingSnapshotName := "existingSnapname" @@ -1378,15 +1378,15 @@ func (s *SnapshotTestSuite) TestCreateFromSource(c *C) { Annotations: annotations, } err := snapshoter.CreateFromSource(ctx, source, false, snapshotMeta, snapshot.ObjectMeta{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) foundSns, err := snapshoter.List(ctx, namespace, labels) - c.Assert(err, IsNil) - c.Assert(foundSns.Items, HasLen, 1) - c.Assert(foundSns.Items[0].Name, Equals, snapshotName) + c.Assert(err, check.IsNil) + c.Assert(foundSns.Items, check.HasLen, 1) + c.Assert(foundSns.Items[0].Name, check.Equals, snapshotName) } } -func (s *SnapshotTestSuite) TestCreateFromSourceStable(c *C) { +func (s *SnapshotTestSuite) TestCreateFromSourceStable(c *check.C) { ctx := context.Background() namespace := "namespace" snapshotName := "snapname" @@ -1410,25 +1410,25 @@ func (s *SnapshotTestSuite) TestCreateFromSourceStable(c *C) { kubeCli := fake.NewSimpleClientset() snapshotterStable, ok := snapshot.NewSnapshotStable(kubeCli, dynCli).(*snapshot.SnapshotStable) - c.Assert(ok, Equals, true) + c.Assert(ok, check.Equals, true) // set true err := snapshotterStable.UpdateVolumeSnapshotStatusStable(ctx, namespace, snapshotName, true) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) us, err := dynCli.Resource(snapshot.VolSnapGVR).Namespace(namespace).Get(ctx, snapshotName, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) status, ok := us.Object["status"].(map[string]interface{}) - c.Assert(ok, Equals, true) - c.Assert(status["readyToUse"], Equals, true) + c.Assert(ok, check.Equals, true) + c.Assert(status["readyToUse"], check.Equals, true) // set false err = snapshotterStable.UpdateVolumeSnapshotStatusStable(ctx, namespace, snapshotName, false) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) us, err = dynCli.Resource(snapshot.VolSnapGVR).Namespace(namespace).Get(ctx, snapshotName, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) status, ok = us.Object["status"].(map[string]interface{}) - c.Assert(ok, Equals, true) - c.Assert(status["readyToUse"], Equals, false) + c.Assert(ok, check.Equals, true) + c.Assert(status["readyToUse"], check.Equals, false) // status not set snapshotMeta = snapshot.ObjectMeta{ @@ -1443,12 +1443,12 @@ func (s *SnapshotTestSuite) TestCreateFromSourceStable(c *C) { snapshotMeta, snapshotContentMeta) dynCli = dynfake.NewSimpleDynamicClient(scheme, volSnap) snapshotterStable, ok = snapshot.NewSnapshotStable(kubeCli, dynCli).(*snapshot.SnapshotStable) - c.Assert(ok, Equals, true) + c.Assert(ok, check.Equals, true) err = snapshotterStable.UpdateVolumeSnapshotStatusStable(ctx, namespace, snapshotName, false) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (s *SnapshotTestSuite) TestGetSnapshotClassbyAnnotation(c *C) { +func (s *SnapshotTestSuite) TestGetSnapshotClassbyAnnotation(c *check.C) { ctx := context.Background() vsc1 := snapshot.UnstructuredVolumeSnapshotClass(v1beta1.VolSnapClassGVR, "vsc1", "driver", snapshot.DeletionPolicyDelete, nil) vsc1.SetAnnotations(map[string]string{ @@ -1479,7 +1479,7 @@ func (s *SnapshotTestSuite) TestGetSnapshotClassbyAnnotation(c *C) { key string value string sc string - errChecker Checker + errChecker check.Checker retVSC string }{ { @@ -1489,7 +1489,7 @@ func (s *SnapshotTestSuite) TestGetSnapshotClassbyAnnotation(c *C) { key: "key", value: "value", sc: "sc1", - errChecker: IsNil, + errChecker: check.IsNil, retVSC: "vsc1", }, { // no vsc available @@ -1499,7 +1499,7 @@ func (s *SnapshotTestSuite) TestGetSnapshotClassbyAnnotation(c *C) { key: "key", value: "value", sc: "sc1", - errChecker: NotNil, + errChecker: check.NotNil, }, { // annotation on sc dyncli: dynfake.NewSimpleDynamicClient(scheme, vsc2), @@ -1508,7 +1508,7 @@ func (s *SnapshotTestSuite) TestGetSnapshotClassbyAnnotation(c *C) { key: "key", value: "value", sc: "sc2", - errChecker: IsNil, + errChecker: check.IsNil, retVSC: "vsc2", }, { // missing vsc @@ -1518,22 +1518,22 @@ func (s *SnapshotTestSuite) TestGetSnapshotClassbyAnnotation(c *C) { key: "key", value: "value", sc: "sc2", - errChecker: NotNil, + errChecker: check.NotNil, }, } { vsc, err := snapshot.GetSnapshotClassbyAnnotation(ctx, tc.dyncli, tc.kubecli, tc.gvr, tc.key, tc.value, tc.sc) c.Check(err, tc.errChecker) - if tc.errChecker == IsNil { - c.Assert(vsc, Equals, tc.retVSC) + if tc.errChecker == check.IsNil { + c.Assert(vsc, check.Equals, tc.retVSC) } } } type SnapshotLocalTestSuite struct{} -var _ = Suite(&SnapshotLocalTestSuite{}) +var _ = check.Suite(&SnapshotLocalTestSuite{}) -func (s *SnapshotLocalTestSuite) TestLabels(c *C) { +func (s *SnapshotLocalTestSuite) TestLabels(c *check.C) { ctx := context.Background() scheme := runtime.NewScheme() scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Kind: "VolumeSnapshotList"}, &unstructured.UnstructuredList{}) @@ -1548,7 +1548,7 @@ func (s *SnapshotLocalTestSuite) TestLabels(c *C) { dynCli dynamic.Interface createLabels map[string]string listLabel map[string]string - errChecker Checker + errChecker check.Checker numResults int }{ { @@ -1559,7 +1559,7 @@ func (s *SnapshotLocalTestSuite) TestLabels(c *C) { listLabel: map[string]string{ "label": "1/2/3", }, - errChecker: IsNil, + errChecker: check.IsNil, numResults: 1, }, { // nothing that matches label @@ -1568,7 +1568,7 @@ func (s *SnapshotLocalTestSuite) TestLabels(c *C) { listLabel: map[string]string{ "label": "1", }, - errChecker: IsNil, + errChecker: check.IsNil, numResults: 0, }, { // empty labels list everytime @@ -1577,12 +1577,12 @@ func (s *SnapshotLocalTestSuite) TestLabels(c *C) { "label": "1", }, listLabel: map[string]string{}, - errChecker: IsNil, + errChecker: check.IsNil, numResults: 1, }, { // nil lists dynCli: dynfake.NewSimpleDynamicClient(scheme), - errChecker: IsNil, + errChecker: check.IsNil, numResults: 1, }, } { @@ -1601,13 +1601,13 @@ func (s *SnapshotLocalTestSuite) TestLabels(c *C) { err = fakeSs.Create(ctx, volName, &snapClass, false, snapshotMeta) if err == nil { list, err = fakeSs.List(ctx, ns, tc.listLabel) - c.Assert(len(list.Items), Equals, tc.numResults) + c.Assert(len(list.Items), check.Equals, tc.numResults) } c.Check(err, tc.errChecker) } } } -func (s *SnapshotLocalTestSuite) TestAnnotations(c *C) { +func (s *SnapshotLocalTestSuite) TestAnnotations(c *check.C) { ctx := context.Background() scheme := runtime.NewScheme() scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Kind: "VolumeSnapshotList"}, &unstructured.UnstructuredList{}) @@ -1621,19 +1621,19 @@ func (s *SnapshotLocalTestSuite) TestAnnotations(c *C) { for _, tc := range []struct { dynCli dynamic.Interface snapshotAnnotations map[string]string - errChecker Checker + errChecker check.Checker }{ { dynCli: dynfake.NewSimpleDynamicClient(scheme), snapshotAnnotations: map[string]string{ "annotationtest": "true", }, - errChecker: IsNil, + errChecker: check.IsNil, }, { // empty annotations list dynCli: dynfake.NewSimpleDynamicClient(scheme), snapshotAnnotations: map[string]string{}, - errChecker: IsNil, + errChecker: check.IsNil, }, { // annotations list matches dynCli: dynfake.NewSimpleDynamicClient(scheme), @@ -1641,11 +1641,11 @@ func (s *SnapshotLocalTestSuite) TestAnnotations(c *C) { "annotationtest": "true", "annotationtest1": "false", }, - errChecker: IsNil, + errChecker: check.IsNil, }, { // nil lists dynCli: dynfake.NewSimpleDynamicClient(scheme), - errChecker: IsNil, + errChecker: check.IsNil, }, } { for _, fakeSs := range []snapshot.Snapshotter{ @@ -1664,8 +1664,8 @@ func (s *SnapshotLocalTestSuite) TestAnnotations(c *C) { if err == nil { vs, err = fakeSs.Get(ctx, snapName, ns) annotation := vs.GetAnnotations() - c.Assert(len(annotation), Equals, len(tc.snapshotAnnotations)) - c.Assert(annotation, DeepEquals, tc.snapshotAnnotations) + c.Assert(len(annotation), check.Equals, len(tc.snapshotAnnotations)) + c.Assert(annotation, check.DeepEquals, tc.snapshotAnnotations) } c.Check(err, tc.errChecker) } @@ -1682,12 +1682,12 @@ func fakePVC(name, namespace string) *corev1.PersistentVolumeClaim { type SnapshotTransformUnstructuredTestSuite struct{} -var _ = Suite(&SnapshotTransformUnstructuredTestSuite{}) +var _ = check.Suite(&SnapshotTransformUnstructuredTestSuite{}) -func (s *SnapshotTransformUnstructuredTestSuite) TestNilUnstructured(c *C) { +func (s *SnapshotTransformUnstructuredTestSuite) TestNilUnstructured(c *check.C) { err := snapshot.TransformUnstructured(nil, nil) - c.Check(err, ErrorMatches, "Cannot deserialize nil unstructured") + c.Check(err, check.ErrorMatches, "Cannot deserialize nil unstructured") u := &unstructured.Unstructured{} err = snapshot.TransformUnstructured(u, nil) - c.Check(err, ErrorMatches, "Failed to Unmarshal unstructured object: json: Unmarshal\\(nil\\)") + c.Check(err, check.ErrorMatches, "Failed to Unmarshal unstructured object: json: Unmarshal\\(nil\\)") } diff --git a/pkg/kube/statefulset_test.go b/pkg/kube/statefulset_test.go index 56c89ccd20..363eb8a197 100644 --- a/pkg/kube/statefulset_test.go +++ b/pkg/kube/statefulset_test.go @@ -22,7 +22,7 @@ import ( "fmt" "strings" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -33,12 +33,12 @@ type StatefulSetSuite struct { namespace string } -var _ = Suite(&StatefulSetSuite{}) +var _ = check.Suite(&StatefulSetSuite{}) -func (s *StatefulSetSuite) SetUpSuite(c *C) { +func (s *StatefulSetSuite) SetUpSuite(c *check.C) { c.Skip("Too slow") cli, err := NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -46,14 +46,14 @@ func (s *StatefulSetSuite) SetUpSuite(c *C) { }, } cns, err := s.cli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name } -func (s *StatefulSetSuite) TearDownSuite(c *C) { +func (s *StatefulSetSuite) TearDownSuite(c *check.C) { if s.namespace != "" { err := s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } @@ -80,18 +80,18 @@ spec: args: ["-f", "/dev/null"] ` -func (s *StatefulSetSuite) TestCreateStatefulSet(c *C) { +func (s *StatefulSetSuite) TestCreateStatefulSet(c *check.C) { ctx := context.Background() // Stateful set names have strict requirements. name := strings.ToLower(c.TestName()) name = strings.Replace(name, ".", "", 1) spec := fmt.Sprintf(ssSpec, name) _, err := CreateStatefulSet(ctx, s.cli, s.namespace, spec) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err = s.cli.AppsV1().StatefulSets(s.namespace).Delete(ctx, name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() _, err = s.cli.CoreV1().Pods(s.namespace).Get(ctx, name+"-0", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } diff --git a/pkg/kube/unstructured_test.go b/pkg/kube/unstructured_test.go index 5b935f3ac2..372ac12a63 100644 --- a/pkg/kube/unstructured_test.go +++ b/pkg/kube/unstructured_test.go @@ -19,7 +19,7 @@ import ( "context" "text/template" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "k8s.io/apimachinery/pkg/runtime/schema" "github.com/kanisterio/kanister/pkg/ksprig" @@ -27,13 +27,13 @@ import ( type UnstructuredSuite struct{} -var _ = Suite(&UnstructuredSuite{}) +var _ = check.Suite(&UnstructuredSuite{}) type Param struct { Unstructured map[string]interface{} } -func (s *UnstructuredSuite) TestFetch(c *C) { +func (s *UnstructuredSuite) TestFetch(c *check.C) { ctx := context.Background() gvr := schema.GroupVersionResource{ Group: "", @@ -41,7 +41,7 @@ func (s *UnstructuredSuite) TestFetch(c *C) { Resource: "services", } u, err := FetchUnstructuredObject(ctx, gvr, "default", "kubernetes") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) buf := bytes.NewBuffer(nil) tp := Param{Unstructured: u.UnstructuredContent()} @@ -53,9 +53,9 @@ func (s *UnstructuredSuite) TestFetch(c *C) { {"{{ .Unstructured.spec.clusterIP }}"}, } { t, err := template.New("config").Option("missingkey=error").Funcs(ksprig.TxtFuncMap()).Parse(tc.arg) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = t.Execute(buf, tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) c.Logf("Template: %s, Value: %s", tc.arg, buf.String()) } diff --git a/pkg/kube/volume/volume_test.go b/pkg/kube/volume/volume_test.go index a0c0532b4c..46330f870a 100644 --- a/pkg/kube/volume/volume_test.go +++ b/pkg/kube/volume/volume_test.go @@ -21,7 +21,7 @@ import ( "reflect" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,13 +32,13 @@ import ( "k8s.io/client-go/kubernetes/fake" ) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type TestVolSuite struct{} -var _ = Suite(&TestVolSuite{}) +var _ = check.Suite(&TestVolSuite{}) -func (s *TestVolSuite) TestCreatePVC(c *C) { +func (s *TestVolSuite) TestCreatePVC(c *check.C) { // Create PVC ctx := context.Background() pvcSize := int64(1024) @@ -47,34 +47,34 @@ func (s *TestVolSuite) TestCreatePVC(c *C) { annotations := map[string]string{"a1": "foo"} cli := fake.NewSimpleClientset() pvcName, err := CreatePVC(ctx, cli, ns, NoPVCNameSpecified, pvcSize, targetVolID, annotations, []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, nil) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pvc, err := cli.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) - c.Assert(len(pvc.Spec.AccessModes) >= 1, Equals, true) + c.Assert(len(pvc.Spec.AccessModes) >= 1, check.Equals, true) accessMode := pvc.Spec.AccessModes[0] - c.Assert(accessMode, Equals, corev1.ReadWriteOnce) + c.Assert(accessMode, check.Equals, corev1.ReadWriteOnce) capacity, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage] - c.Assert(ok, Equals, true) - c.Assert(capacity.Value() >= int64(pvcSize), Equals, true) + c.Assert(ok, check.Equals, true) + c.Assert(capacity.Value() >= int64(pvcSize), check.Equals, true) eq := reflect.DeepEqual(annotations, pvc.ObjectMeta.Annotations) - c.Assert(eq, Equals, true) - c.Assert(len(pvc.Spec.Selector.MatchLabels) >= 1, Equals, true) + c.Assert(eq, check.Equals, true) + c.Assert(len(pvc.Spec.Selector.MatchLabels) >= 1, check.Equals, true) label := pvc.Spec.Selector.MatchLabels[pvMatchLabelName] - c.Assert(label, Equals, filepath.Base(targetVolID)) + c.Assert(label, check.Equals, filepath.Base(targetVolID)) volumeMode := corev1.PersistentVolumeBlock _, err = CreatePVC(ctx, cli, ns, "pvc2", pvcSize, targetVolID, annotations, nil, &volumeMode) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pvc2, err := cli.CoreV1().PersistentVolumeClaims(ns).Get(ctx, "pvc2", metav1.GetOptions{}) - c.Assert(err, IsNil) - c.Assert(len(pvc2.Spec.AccessModes) >= 1, Equals, true) - c.Assert(*pvc2.Spec.VolumeMode, Equals, corev1.PersistentVolumeBlock) - c.Assert(pvc2.GetAnnotations(), NotNil) - c.Assert(pvc2.GetAnnotations()["a1"], Equals, "foo") + c.Assert(err, check.IsNil) + c.Assert(len(pvc2.Spec.AccessModes) >= 1, check.Equals, true) + c.Assert(*pvc2.Spec.VolumeMode, check.Equals, corev1.PersistentVolumeBlock) + c.Assert(pvc2.GetAnnotations(), check.NotNil) + c.Assert(pvc2.GetAnnotations()["a1"], check.Equals, "foo") } -func (s *TestVolSuite) TestGetPVCRestoreSize(c *C) { +func (s *TestVolSuite) TestGetPVCRestoreSize(c *check.C) { ctx := context.Background() scheme := runtime.NewScheme() scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1", Kind: "VolumeSnapshotList"}, &unstructured.UnstructuredList{}) @@ -89,7 +89,7 @@ func (s *TestVolSuite) TestGetPVCRestoreSize(c *C) { for _, tc := range []struct { args *CreatePVCFromSnapshotArgs sizeValue int64 - errChecker Checker + errChecker check.Checker }{ { // only snapshot restore size args: &CreatePVCFromSnapshotArgs{ @@ -100,7 +100,7 @@ func (s *TestVolSuite) TestGetPVCRestoreSize(c *C) { Namespace: "vsNamespace", }, sizeValue: 10737418240, - errChecker: IsNil, + errChecker: check.IsNil, }, { // only args restore size args: &CreatePVCFromSnapshotArgs{ @@ -112,7 +112,7 @@ func (s *TestVolSuite) TestGetPVCRestoreSize(c *C) { RestoreSize: "10Gi", }, sizeValue: 10737418240, - errChecker: IsNil, + errChecker: check.IsNil, }, { // neither args: &CreatePVCFromSnapshotArgs{ @@ -122,7 +122,7 @@ func (s *TestVolSuite) TestGetPVCRestoreSize(c *C) { SnapshotName: "vsName", Namespace: "vsNamespace", }, - errChecker: NotNil, + errChecker: check.NotNil, }, { // both, snapshot size is bigger args: &CreatePVCFromSnapshotArgs{ @@ -134,7 +134,7 @@ func (s *TestVolSuite) TestGetPVCRestoreSize(c *C) { RestoreSize: "9Gi", }, sizeValue: 10737418240, - errChecker: IsNil, + errChecker: check.IsNil, }, { // both, args size is bigger args: &CreatePVCFromSnapshotArgs{ @@ -146,7 +146,7 @@ func (s *TestVolSuite) TestGetPVCRestoreSize(c *C) { RestoreSize: "10Gi", }, sizeValue: 10737418240, - errChecker: IsNil, + errChecker: check.IsNil, }, { // Failed to find snapshot args: &CreatePVCFromSnapshotArgs{ @@ -155,7 +155,7 @@ func (s *TestVolSuite) TestGetPVCRestoreSize(c *C) { SnapshotName: "vsName", Namespace: "vsNamespace", }, - errChecker: NotNil, + errChecker: check.NotNil, }, { // Failed to create snapshotter args: &CreatePVCFromSnapshotArgs{ @@ -164,7 +164,7 @@ func (s *TestVolSuite) TestGetPVCRestoreSize(c *C) { SnapshotName: "vsName", Namespace: "vsNamespace", }, - errChecker: NotNil, + errChecker: check.NotNil, }, { // bad args restore size args: &CreatePVCFromSnapshotArgs{ @@ -172,13 +172,13 @@ func (s *TestVolSuite) TestGetPVCRestoreSize(c *C) { Namespace: "vsNamespace", RestoreSize: "10wut", }, - errChecker: NotNil, + errChecker: check.NotNil, }, } { q, err := getPVCRestoreSize(ctx, tc.args) c.Assert(err, tc.errChecker) - if tc.errChecker == IsNil { - c.Assert(q.Value(), Equals, tc.sizeValue) + if tc.errChecker == check.IsNil { + c.Assert(q.Value(), check.Equals, tc.sizeValue) } } } @@ -202,7 +202,7 @@ func (s *TestVolSuite) fakeUnstructuredSnasphotWSize(vsName, namespace, size str return &unstructured.Unstructured{Object: Object} } -func (s *TestVolSuite) TestZoneToRegion(c *C) { +func (s *TestVolSuite) TestZoneToRegion(c *check.C) { for idx, tc := range []struct { zone string expectedRegion []string @@ -237,6 +237,6 @@ func (s *TestVolSuite) TestZoneToRegion(c *C) { }, } { reg := zonesToRegions(tc.zone) - c.Assert(reg, DeepEquals, tc.expectedRegion, Commentf("Case #%d", idx)) + c.Assert(reg, check.DeepEquals, tc.expectedRegion, check.Commentf("Case #%d", idx)) } } diff --git a/pkg/kube/workload_ready_test.go b/pkg/kube/workload_ready_test.go index 0102e1ff7a..ce45d76a2c 100644 --- a/pkg/kube/workload_ready_test.go +++ b/pkg/kube/workload_ready_test.go @@ -4,7 +4,7 @@ import ( "context" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -18,7 +18,7 @@ import ( type WorkloadReadySuite struct{} -var _ = Suite(&WorkloadReadySuite{}) +var _ = check.Suite(&WorkloadReadySuite{}) type cliParams struct { name string @@ -34,7 +34,7 @@ type cliParams struct { } // These tests can be used to force the various error states -func (s *WorkloadReadySuite) TestWaitOnStatefulSetReady(c *C) { +func (s *WorkloadReadySuite) TestWaitOnStatefulSetReady(c *check.C) { testCases := []struct { input cliParams want string @@ -57,21 +57,21 @@ func (s *WorkloadReadySuite) TestWaitOnStatefulSetReady(c *C) { if tc.want != "" { errorchecker.AssertErrorMessage(c, err, tc.want) } else { - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } } -func (s *WorkloadReadySuite) TestStatefulSetReady(c *C) { +func (s *WorkloadReadySuite) TestStatefulSetReady(c *check.C) { cp := cliParams{"ss", "default", true, 1, 1, 1, 1, 1, 1, "Running"} ctx := context.Background() ready, status, err := StatefulSetReady(ctx, getCli(cp), cp.namespace, cp.name) - c.Assert(status, DeepEquals, "") - c.Assert(ready, DeepEquals, true) - c.Assert(err, IsNil) + c.Assert(status, check.DeepEquals, "") + c.Assert(ready, check.DeepEquals, true) + c.Assert(err, check.IsNil) } -func (s *WorkloadReadySuite) TestWaitOnDeploymentReady(c *C) { +func (s *WorkloadReadySuite) TestWaitOnDeploymentReady(c *check.C) { testCases := []struct { input cliParams want string @@ -103,18 +103,18 @@ func (s *WorkloadReadySuite) TestWaitOnDeploymentReady(c *C) { if tc.want != "" { errorchecker.AssertErrorMessage(c, err, tc.want) } else { - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } } -func (s *WorkloadReadySuite) TestDeploymentReady(c *C) { +func (s *WorkloadReadySuite) TestDeploymentReady(c *check.C) { cp := cliParams{"dep", "default", false, 1, 1, 1, 1, 1, 1, "Running"} ctx := context.Background() ready, status, err := DeploymentReady(ctx, getCli(cp), cp.namespace, cp.name) - c.Assert(ready, DeepEquals, true) - c.Assert(status, DeepEquals, "") - c.Assert(err, IsNil) + c.Assert(ready, check.DeepEquals, true) + c.Assert(status, check.DeepEquals, "") + c.Assert(err, check.IsNil) } // Returns a fake k8s cli that contains a Deployment, ReplicaSet or StatefulSet, and Pod diff --git a/pkg/kube/workload_test.go b/pkg/kube/workload_test.go index f58050520e..d78bfc3f0d 100644 --- a/pkg/kube/workload_test.go +++ b/pkg/kube/workload_test.go @@ -5,7 +5,7 @@ import ( osapps "github.com/openshift/api/apps/v1" osversioned "github.com/openshift/client-go/apps/clientset/versioned" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -13,19 +13,19 @@ import ( type WorkloadSuite struct{} -var _ = Suite(&WorkloadSuite{}) +var _ = check.Suite(&WorkloadSuite{}) -func (s *WorkloadSuite) TestScaleDeploymentConfig(c *C) { +func (s *WorkloadSuite) TestScaleDeploymentConfig(c *check.C) { // Get K8s client cfg, err := LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cli, err := kubernetes.NewForConfig(cfg) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check if we're in OpenShift ctx := context.Background() ok, err := IsOSAppsGroupAvailable(ctx, cli.Discovery()) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) if !ok { c.Skip("Skipping test since this only runs on OpenShift") } @@ -37,23 +37,23 @@ func (s *WorkloadSuite) TestScaleDeploymentConfig(c *C) { }, } ns, err = cli.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err = cli.CoreV1().Namespaces().Delete(context.TODO(), ns.GetName(), metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() // Create simple DeploymentConfig dc := newDeploymentConfig() osCli, err := osversioned.NewForConfig(cfg) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) dc, err = osCli.AppsV1().DeploymentConfigs(ns.GetName()).Create(ctx, dc, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = ScaleDeploymentConfig(ctx, cli, osCli, dc.GetNamespace(), dc.GetName(), 0, true) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = ScaleDeploymentConfig(ctx, cli, osCli, dc.GetNamespace(), dc.GetName(), 1, true) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } func newDeploymentConfig() *osapps.DeploymentConfig { diff --git a/pkg/location/location_test.go b/pkg/location/location_test.go index b7ab4d4099..a61267598f 100644 --- a/pkg/location/location_test.go +++ b/pkg/location/location_test.go @@ -24,7 +24,7 @@ import ( "testing" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -36,7 +36,7 @@ import ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type LocationSuite struct { osType objectstore.ProviderType @@ -55,11 +55,11 @@ const ( testRegionS3 = "us-west-2" ) -var _ = Suite(&LocationSuite{osType: objectstore.ProviderTypeS3, region: testRegionS3}) -var _ = Suite(&LocationSuite{osType: objectstore.ProviderTypeGCS, region: ""}) -var _ = Suite(&LocationSuite{osType: objectstore.ProviderTypeAzure, region: ""}) +var _ = check.Suite(&LocationSuite{osType: objectstore.ProviderTypeS3, region: testRegionS3}) +var _ = check.Suite(&LocationSuite{osType: objectstore.ProviderTypeGCS, region: ""}) +var _ = check.Suite(&LocationSuite{osType: objectstore.ProviderTypeAzure, region: ""}) -func (s *LocationSuite) SetUpSuite(c *C) { +func (s *LocationSuite) SetUpSuite(c *check.C) { var location crv1alpha1.Location switch s.osType { case objectstore.ProviderTypeS3: @@ -94,22 +94,22 @@ func (s *LocationSuite) SetUpSuite(c *C) { Region: s.region, } secret, err := getOSSecret(ctx, s.osType, s.profile.Credential) - c.Check(err, IsNil) + c.Check(err, check.IsNil) s.provider, err = objectstore.NewProvider(ctx, pc, secret) - c.Check(err, IsNil) - c.Assert(s.provider, NotNil) + c.Check(err, check.IsNil) + c.Assert(s.provider, check.NotNil) s.root, err = objectstore.GetOrCreateBucket(ctx, s.provider, testBucketName) - c.Check(err, IsNil) - c.Assert(s.root, NotNil) + c.Check(err, check.IsNil) + c.Assert(s.root, check.NotNil) s.suiteDirPrefix = time.Now().UTC().Format(time.RFC3339Nano) s.testpath = s.suiteDirPrefix + "/testlocation.txt" s.testMultipartPath = s.suiteDirPrefix + "/testchunk.txt" } -func (s *LocationSuite) TearDownTest(c *C) { +func (s *LocationSuite) TearDownTest(c *check.C) { if s.testpath != "" { - c.Assert(s.root, NotNil) + c.Assert(s.root, check.NotNil) ctx := context.Background() err := s.root.Delete(ctx, s.testpath) if err != nil { @@ -117,7 +117,7 @@ func (s *LocationSuite) TearDownTest(c *C) { } } if s.testMultipartPath != "" { - c.Assert(s.root, NotNil) + c.Assert(s.root, check.NotNil) ctx := context.Background() err := s.root.Delete(ctx, s.testMultipartPath) if err != nil { @@ -126,18 +126,18 @@ func (s *LocationSuite) TearDownTest(c *C) { } } -func (s *LocationSuite) TestWriteAndReadData(c *C) { +func (s *LocationSuite) TestWriteAndReadData(c *check.C) { ctx := context.Background() teststring := "test-content-check" err := writeData(ctx, s.osType, s.profile, bytes.NewBufferString(teststring), s.testpath) - c.Check(err, IsNil) + c.Check(err, check.IsNil) buf := bytes.NewBuffer(nil) err = readData(ctx, s.osType, s.profile, buf, s.testpath) - c.Check(err, IsNil) - c.Check(buf.String(), Equals, teststring) + c.Check(err, check.IsNil) + c.Check(buf.String(), check.Equals, teststring) } -func (s *LocationSuite) TestAzMultipartUpload(c *C) { +func (s *LocationSuite) TestAzMultipartUpload(c *check.C) { if s.osType != objectstore.ProviderTypeAzure { c.Skip(fmt.Sprintf("Not applicable for location type %s", s.osType)) } @@ -146,14 +146,14 @@ func (s *LocationSuite) TestAzMultipartUpload(c *C) { _, err := os.Stat(s.suiteDirPrefix) if os.IsNotExist(err) { err := os.MkdirAll(s.suiteDirPrefix, 0755) - c.Check(err, IsNil) + c.Check(err, check.IsNil) } // Create test file f, err := os.Create(s.testMultipartPath) - c.Check(err, IsNil) + c.Check(err, check.IsNil) defer func() { err = f.Close() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() ctx := context.Background() for _, fileSize := range []int64{ @@ -165,21 +165,21 @@ func (s *LocationSuite) TestAzMultipartUpload(c *C) { 300 * 1024 * 1024, // 300M ie > buffSize } { _, err := f.Seek(0, io.SeekStart) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Create dump file err = os.Truncate(s.testMultipartPath, fileSize) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = writeData(ctx, s.osType, s.profile, f, s.testMultipartPath) - c.Check(err, IsNil) + c.Check(err, check.IsNil) buf := bytes.NewBuffer(nil) err = readData(ctx, s.osType, s.profile, buf, s.testMultipartPath) - c.Check(err, IsNil) - c.Check(int64(buf.Len()), Equals, fileSize) + c.Check(err, check.IsNil) + c.Check(int64(buf.Len()), check.Equals, fileSize) } } -func (s *LocationSuite) TestReaderSize(c *C) { +func (s *LocationSuite) TestReaderSize(c *check.C) { for _, tc := range []struct { input string buffSize int64 @@ -207,16 +207,16 @@ func (s *LocationSuite) TestReaderSize(c *C) { }, } { _, size, err := readerSize(bytes.NewBufferString(tc.input), tc.buffSize) - c.Assert(err, IsNil) - c.Assert(size, Equals, tc.expectedSize) + c.Assert(err, check.IsNil) + c.Assert(size, check.Equals, tc.expectedSize) } } -func (s *LocationSuite) TestGetAzureSecret(c *C) { +func (s *LocationSuite) TestGetAzureSecret(c *check.C) { for _, tc := range []struct { cred param.Credential retAzSecret *objectstore.SecretAzure - errChecker Checker + errChecker check.Checker }{ { cred: param.Credential{ @@ -238,7 +238,7 @@ func (s *LocationSuite) TestGetAzureSecret(c *C) { StorageAccount: "id", StorageKey: "secret", }, - errChecker: IsNil, + errChecker: check.IsNil, }, { cred: param.Credential{ @@ -261,7 +261,7 @@ func (s *LocationSuite) TestGetAzureSecret(c *C) { StorageKey: "sakey", EnvironmentName: "env", }, - errChecker: IsNil, + errChecker: check.IsNil, }, { // missing required field cred: param.Credential{ @@ -283,7 +283,7 @@ func (s *LocationSuite) TestGetAzureSecret(c *C) { StorageKey: "sakey", EnvironmentName: "env", }, - errChecker: NotNil, + errChecker: check.NotNil, }, { // additional incorrect field cred: param.Credential{ @@ -307,15 +307,15 @@ func (s *LocationSuite) TestGetAzureSecret(c *C) { StorageKey: "sakey", EnvironmentName: "env", }, - errChecker: NotNil, + errChecker: check.NotNil, }, } { secret, err := getAzureSecret(tc.cred) c.Assert(err, tc.errChecker) - if tc.errChecker == IsNil { - c.Assert(secret.Azure.StorageKey, Equals, tc.retAzSecret.StorageKey) - c.Assert(secret.Azure.StorageAccount, Equals, tc.retAzSecret.StorageAccount) - c.Assert(secret.Azure.EnvironmentName, Equals, tc.retAzSecret.EnvironmentName) + if tc.errChecker == check.IsNil { + c.Assert(secret.Azure.StorageKey, check.Equals, tc.retAzSecret.StorageKey) + c.Assert(secret.Azure.StorageAccount, check.Equals, tc.retAzSecret.StorageAccount) + c.Assert(secret.Azure.EnvironmentName, check.Equals, tc.retAzSecret.EnvironmentName) } } } diff --git a/pkg/log/fluentbit_test.go b/pkg/log/fluentbit_test.go index 638a5a2a9d..3b1f5464cb 100644 --- a/pkg/log/fluentbit_test.go +++ b/pkg/log/fluentbit_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/sirupsen/logrus" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) const ( @@ -18,9 +18,9 @@ const ( type FluentbitSuite struct{} -var _ = Suite(&FluentbitSuite{}) +var _ = check.Suite(&FluentbitSuite{}) -func (s *FluentbitSuite) TestSendLogsServerRunning(c *C) { +func (s *FluentbitSuite) TestSendLogsServerRunning(c *check.C) { end := make(chan bool) // Fake Fluentbit go runServer(0, end, c) @@ -34,7 +34,7 @@ func (s *FluentbitSuite) TestSendLogsServerRunning(c *C) { } } -func (s *FluentbitSuite) TestSendLogsServerFailedInTheMiddle(c *C) { +func (s *FluentbitSuite) TestSendLogsServerFailedInTheMiddle(c *check.C) { c.Logf("Error messages are expected in this test") end := make(chan bool) @@ -47,7 +47,7 @@ func (s *FluentbitSuite) TestSendLogsServerFailedInTheMiddle(c *C) { } } -func (s *FluentbitSuite) TestSendLogsServerUnavailableFromStart(c *C) { +func (s *FluentbitSuite) TestSendLogsServerUnavailableFromStart(c *check.C) { c.Logf("Error messages are expected in this test") h := NewFluentbitHook(fakeEndPoint) @@ -59,7 +59,7 @@ func (s *FluentbitSuite) TestSendLogsServerUnavailableFromStart(c *C) { } } -func runServer(failAfterNLogs int, endFlag chan<- bool, c *C) { +func runServer(failAfterNLogs int, endFlag chan<- bool, c *check.C) { result := make([]string, 0) t := time.NewTimer(10 * time.Second) defer close(endFlag) @@ -67,7 +67,7 @@ func runServer(failAfterNLogs int, endFlag chan<- bool, c *C) { l := resolveAndListen(fakeEndPoint, c) defer func() { err := l.Close() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() Loop: @@ -76,7 +76,7 @@ Loop: case <-t.C: // Success condition is that the server // processed at least 1 message in 5 sec - c.Assert(result, Not(HasLen), 0) + c.Assert(result, check.Not(check.HasLen), 0) break Loop default: // or it processed all of them under 5 sec. @@ -90,11 +90,11 @@ Loop: continue } bytes, rerr := io.ReadAll(conn) - c.Assert(rerr, IsNil) + c.Assert(rerr, check.IsNil) strs := strings.Split(strings.Trim(string(bytes), "\n"), "\n") result = append(result, strs...) - c.Assert(conn.Close(), IsNil) + c.Assert(conn.Close(), check.IsNil) if failAfterNLogs != 0 && len(result) > failAfterNLogs { c.Logf("Server is failed as expected after %d logs", failAfterNLogs) break @@ -103,11 +103,11 @@ Loop: c.Logf("Server: Received %d of total %d logs", len(result), numMsgs) } -func resolveAndListen(endpoint string, c *C) *net.TCPListener { +func resolveAndListen(endpoint string, c *check.C) *net.TCPListener { addr, err := net.ResolveTCPAddr("tcp", endpoint) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) l, err := net.ListenTCP("tcp", addr) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) return l } diff --git a/pkg/log/log.go b/pkg/log/log.go index d40f22f4b4..fcd1932cb0 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "io" + "net/url" "os" "strings" "time" @@ -32,6 +33,12 @@ const ( LevelEnvName = "LOG_LEVEL" ) +var ( + ErrEndpointNotSet = errors.New("fluentbit endpoint not set") + ErrNonTCPEndpoint = errors.New("fluentbit endpoint scheme must be tcp") + ErrPathSet = errors.New("fluentbit endpoint path is set") +) + // OutputSink describes the current output sink. type OutputSink uint8 @@ -79,6 +86,22 @@ func SetOutput(sink OutputSink) error { } } +// SetFluentbitOutput sets the fluentbit output +func SetFluentbitOutput(url *url.URL) error { + if url == nil || url.Host == "" { + return ErrEndpointNotSet + } + if url.Scheme != "tcp" { + return ErrNonTCPEndpoint + } + if url.Path != "" { + return ErrPathSet + } + hook := NewFluentbitHook(url.Host) + log.AddHook(hook) + return nil +} + var envVarFields field.Fields // initEnvVarFields populates envVarFields with values from the host's environment. diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go index a372911f79..838942d7eb 100644 --- a/pkg/log/log_test.go +++ b/pkg/log/log_test.go @@ -6,13 +6,14 @@ import ( "encoding/json" "errors" "fmt" + "net/url" "os" "sync" "testing" "time" "github.com/sirupsen/logrus" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/field" ) @@ -23,80 +24,80 @@ const ( type LogSuite struct{} -var _ = Suite(&LogSuite{}) +var _ = check.Suite(&LogSuite{}) func Test(t *testing.T) { - TestingT(t) + check.TestingT(t) } -func (s *LogSuite) TestWithNilError(c *C) { +func (s *LogSuite) TestWithNilError(c *check.C) { log.SetFormatter(&logrus.JSONFormatter{TimestampFormat: time.RFC3339Nano}) // Should not panic WithError(nil).Print("Message") } -func (s *LogSuite) TestWithNilContext(c *C) { +func (s *LogSuite) TestWithNilContext(c *check.C) { log.SetFormatter(&logrus.JSONFormatter{TimestampFormat: time.RFC3339Nano}) // Should not panic WithContext(nil).Print("Message") //nolint:staticcheck } -func (s *LogSuite) TestLogMessage(c *C) { +func (s *LogSuite) TestLogMessage(c *check.C) { const text = "Some useful text." testLogMessage(c, text, Print) } -func (s *LogSuite) TestLogWithFields(c *C) { +func (s *LogSuite) TestLogWithFields(c *check.C) { const text = "Some useful text." entry := testLogMessage(c, text, Print, field.M{"key": "value"}) - c.Assert(entry["level"], Equals, infoLevelStr) + c.Assert(entry["level"], check.Equals, infoLevelStr) // Error should not be set in the log entry - c.Assert(entry["error"], Equals, nil) + c.Assert(entry["error"], check.Equals, nil) // A field with "key" should be set in the log entry - c.Assert(entry["key"], Equals, "value") + c.Assert(entry["key"], check.Equals, "value") } -func (s *LogSuite) TestLogWithError(c *C) { +func (s *LogSuite) TestLogWithError(c *check.C) { const text = "My error message" err := errors.New("test error") entry := testLogMessage(c, text, WithError(err).Print) - c.Assert(entry["error"], Equals, err.Error()) - c.Assert(entry["level"], Equals, infoLevelStr) + c.Assert(entry["error"], check.Equals, err.Error()) + c.Assert(entry["level"], check.Equals, infoLevelStr) } -func (s *LogSuite) TestLogWithContext(c *C) { +func (s *LogSuite) TestLogWithContext(c *check.C) { const text = "My error message" ctx := context.Background() entry := testLogMessage(c, text, WithContext(ctx).Print) - c.Assert(entry["level"], Equals, infoLevelStr) + c.Assert(entry["level"], check.Equals, infoLevelStr) // Error should not be set in the log entry - c.Assert(entry["error"], Equals, nil) + c.Assert(entry["error"], check.Equals, nil) } -func (s *LogSuite) TestLogWithContextFields(c *C) { +func (s *LogSuite) TestLogWithContextFields(c *check.C) { const text = "My error message" ctx := field.Context(context.Background(), "key", "value") entry := testLogMessage(c, text, WithContext(ctx).Print) - c.Assert(entry["level"], Equals, infoLevelStr) + c.Assert(entry["level"], check.Equals, infoLevelStr) // Error should not be set in the log entry - c.Assert(entry["error"], Equals, nil) + c.Assert(entry["error"], check.Equals, nil) // A field with "key" should be set in the log entry - c.Assert(entry["key"], Equals, "value") + c.Assert(entry["key"], check.Equals, "value") } -func (s *LogSuite) TestLogWithContextFieldsAndError(c *C) { +func (s *LogSuite) TestLogWithContextFieldsAndError(c *check.C) { const text = "My error message" ctx := field.Context(context.Background(), "key", "value") err := errors.New("test error") entry := testLogMessage(c, text, WithError(err).WithContext(ctx).Print) - c.Assert(entry["level"], Equals, infoLevelStr) + c.Assert(entry["level"], check.Equals, infoLevelStr) // Error should be included in the log entry - c.Assert(entry["error"], Equals, err.Error()) + c.Assert(entry["error"], check.Equals, err.Error()) // A field with "key" should be set in the log entry - c.Assert(entry["key"], Equals, "value") + c.Assert(entry["key"], check.Equals, "value") } -func (s *LogSuite) TestLogPrintTo(c *C) { +func (s *LogSuite) TestLogPrintTo(c *check.C) { buf := &bytes.Buffer{} msg := "test log message" fields := map[string]interface{}{ @@ -107,12 +108,12 @@ func (s *LogSuite) TestLogPrintTo(c *C) { entry := map[string]interface{}{} err := json.Unmarshal(buf.Bytes(), &entry) - c.Assert(err, IsNil) - c.Assert(entry, NotNil) - c.Assert(entry["msg"], Equals, msg) + c.Assert(err, check.IsNil) + c.Assert(entry, check.NotNil) + c.Assert(entry["msg"], check.Equals, msg) } -func (s *LogSuite) TestLogPrintToParallel(c *C) { +func (s *LogSuite) TestLogPrintToParallel(c *check.C) { // this test ensures that the io.Writer passed to PrintTo() doesn't override // that of the global logger. // previously, the entry() function would return an entry bound to the global @@ -145,28 +146,28 @@ func (s *LogSuite) TestLogPrintToParallel(c *C) { for i := 0; i < len(buffers); i++ { actual := map[string]interface{}{} err := json.Unmarshal(buffers[i].Bytes(), &actual) - c.Assert(err, IsNil) - c.Assert(actual, NotNil) - c.Assert(actual["msg"], Equals, fmt.Sprintf("%s %d", msg, i)) + c.Assert(err, check.IsNil) + c.Assert(actual, check.NotNil) + c.Assert(actual["msg"], check.Equals, fmt.Sprintf("%s %d", msg, i)) } } -func testLogMessage(c *C, msg string, print func(string, ...field.M), fields ...field.M) map[string]interface{} { +func testLogMessage(c *check.C, msg string, print func(string, ...field.M), fields ...field.M) map[string]interface{} { log.SetFormatter(&logrus.JSONFormatter{TimestampFormat: time.RFC3339Nano}) var memLog bytes.Buffer log.SetOutput(&memLog) print(msg, fields...) var entry map[string]interface{} err := json.Unmarshal(memLog.Bytes(), &entry) - c.Assert(err, IsNil) - c.Assert(entry, NotNil) - c.Assert(entry["msg"], Equals, msg) + c.Assert(err, check.IsNil) + c.Assert(entry, check.NotNil) + c.Assert(entry["msg"], check.Equals, msg) return entry } -func (s *LogSuite) TestLogLevel(c *C) { +func (s *LogSuite) TestLogLevel(c *check.C) { err := os.Unsetenv(LevelEnvName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) initLogLevel() log.SetFormatter(&logrus.JSONFormatter{TimestampFormat: time.RFC3339Nano}) @@ -178,35 +179,35 @@ func (s *LogSuite) TestLogLevel(c *C) { Debug().WithContext(ctx).Print("Testing debug level") err = json.Unmarshal(output.Bytes(), &entry) - c.Assert(err, NotNil) - c.Assert(output.String(), HasLen, 0) + c.Assert(err, check.NotNil) + c.Assert(output.String(), check.HasLen, 0) // Check if debug level log is printed when log level is debug err = os.Setenv(LevelEnvName, "debug") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err := os.Unsetenv(LevelEnvName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) initLogLevel() }() initLogLevel() Debug().WithContext(ctx).Print("Testing debug level") cerr := json.Unmarshal(output.Bytes(), &entry) - c.Assert(cerr, IsNil) - c.Assert(entry, NotNil) - c.Assert(entry["msg"], Equals, "Testing debug level") + c.Assert(cerr, check.IsNil) + c.Assert(entry, check.NotNil) + c.Assert(entry["msg"], check.Equals, "Testing debug level") } -func (s *LogSuite) TestCloneGlobalLogger(c *C) { +func (s *LogSuite) TestCloneGlobalLogger(c *check.C) { hook := newTestLogHook() log.AddHook(hook) actual := cloneGlobalLogger() - c.Assert(actual.Formatter, Equals, log.Formatter) - c.Assert(actual.ReportCaller, Equals, log.ReportCaller) - c.Assert(actual.Level, Equals, log.Level) - c.Assert(actual.Out, Equals, log.Out) - c.Assert(actual.Hooks, DeepEquals, log.Hooks) + c.Assert(actual.Formatter, check.Equals, log.Formatter) + c.Assert(actual.ReportCaller, check.Equals, log.ReportCaller) + c.Assert(actual.Level, check.Equals, log.Level) + c.Assert(actual.Out, check.Equals, log.Out) + c.Assert(actual.Hooks, check.DeepEquals, log.Hooks) // changing `actual` should not affect global logger actual.SetFormatter(&logrus.TextFormatter{}) @@ -215,15 +216,61 @@ func (s *LogSuite) TestCloneGlobalLogger(c *C) { actual.SetOutput(&bytes.Buffer{}) actual.AddHook(&logHook{}) - c.Assert(actual.Formatter, Not(Equals), log.Formatter) - c.Assert(actual.ReportCaller, Not(Equals), log.ReportCaller) - c.Assert(actual.Level, Not(Equals), log.Level) - c.Assert(actual.Out, Not(Equals), log.Out) - c.Assert(actual.Hooks, Not(DeepEquals), log.Hooks) + c.Assert(actual.Formatter, check.Not(check.Equals), log.Formatter) + c.Assert(actual.ReportCaller, check.Not(check.Equals), log.ReportCaller) + c.Assert(actual.Level, check.Not(check.Equals), log.Level) + c.Assert(actual.Out, check.Not(check.Equals), log.Out) + c.Assert(actual.Hooks, check.Not(check.DeepEquals), log.Hooks) log.Println("Test message") - c.Assert(len(hook.capturedMessages), Equals, 1) - c.Assert(hook.capturedMessages[0].Message, Equals, "Test message") + c.Assert(len(hook.capturedMessages), check.Equals, 1) + c.Assert(hook.capturedMessages[0].Message, check.Equals, "Test message") +} + +func (s *LogSuite) TestSetFluentbitOutput(c *check.C) { + for _, tc := range []struct { + desc string + url *url.URL + err error + }{ + { + desc: "valid_url", + url: &url.URL{ + Scheme: "tcp", + Host: "something", + }, + }, + { + desc: "path_is_set", + url: &url.URL{ + Scheme: "tcp", + Host: "something", + Path: "something", + }, + err: ErrPathSet, + }, + { + desc: "non_tcp_endpoint", + url: &url.URL{ + Scheme: "http", + Host: "something", + Path: "something", + }, + err: ErrNonTCPEndpoint, + }, + { + desc: "empty_endpoint", + url: &url.URL{}, + err: ErrEndpointNotSet, + }, + { + desc: "nil_endpoint", + err: ErrEndpointNotSet, + }, + } { + err := SetFluentbitOutput(tc.url) + c.Assert(err, check.Equals, tc.err) + } } type logHook struct { diff --git a/pkg/metrics/metrics_test.go b/pkg/metrics/metrics_test.go index a9edb487bd..6bde0df8ac 100644 --- a/pkg/metrics/metrics_test.go +++ b/pkg/metrics/metrics_test.go @@ -19,16 +19,16 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type MetricsSuite struct{} -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } -var _ = Suite(&MetricsSuite{}) +var _ = check.Suite(&MetricsSuite{}) -func (m *MetricsSuite) TestGetLabelCombinations(c *C) { +func (m *MetricsSuite) TestGetLabelCombinations(c *check.C) { boundedLabels := make([]BoundedLabel, 3) boundedLabels[0] = BoundedLabel{LabelName: "operation_type", LabelValues: []string{"backup", "restore"}} boundedLabels[1] = BoundedLabel{LabelName: "resolution", LabelValues: []string{"success", "failure"}} @@ -76,15 +76,15 @@ func (m *MetricsSuite) TestGetLabelCombinations(c *C) { }, } receivedCombinations, err := getLabelCombinations(boundedLabels) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) isEqual := reflect.DeepEqual(receivedCombinations, expectedPrometheusLabels) - c.Assert(isEqual, Equals, true) + c.Assert(isEqual, check.Equals, true) boundedLabels = make([]BoundedLabel, 0) receivedCombinations, err = getLabelCombinations(boundedLabels) - c.Assert(receivedCombinations, IsNil) - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "invalid BoundedLabel list") + c.Assert(receivedCombinations, check.IsNil) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), check.Equals, "invalid BoundedLabel list") boundedLabels = make([]BoundedLabel, 1) boundedLabels[0] = BoundedLabel{LabelName: "resolution", LabelValues: []string{"success", "failure"}} @@ -98,25 +98,25 @@ func (m *MetricsSuite) TestGetLabelCombinations(c *C) { } receivedCombinations, err = getLabelCombinations(boundedLabels) isEqual = reflect.DeepEqual(receivedCombinations, expectedPrometheusLabels) - c.Assert(err, IsNil) - c.Assert(isEqual, Equals, true) + c.Assert(err, check.IsNil) + c.Assert(isEqual, check.Equals, true) boundedLabels = make([]BoundedLabel, 1) boundedLabels[0] = BoundedLabel{LabelName: "resolution", LabelValues: nil} receivedCombinations, err = getLabelCombinations(boundedLabels) - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "invalid BoundedLabel list") - c.Assert(receivedCombinations, IsNil) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), check.Equals, "invalid BoundedLabel list") + c.Assert(receivedCombinations, check.IsNil) boundedLabels = make([]BoundedLabel, 1) boundedLabels[0] = BoundedLabel{LabelName: "resolution", LabelValues: []string{}} - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "invalid BoundedLabel list") - c.Assert(receivedCombinations, IsNil) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), check.Equals, "invalid BoundedLabel list") + c.Assert(receivedCombinations, check.IsNil) } -func (m *MetricsSuite) TestInitCounterVec(c *C) { +func (m *MetricsSuite) TestInitCounterVec(c *check.C) { boundedLabels := make([]BoundedLabel, 2) boundedLabels[0] = BoundedLabel{LabelName: "operation_type", LabelValues: []string{"backup", "restore"}} boundedLabels[1] = BoundedLabel{LabelName: "resolution", LabelValues: []string{"success", "failure"}} @@ -126,15 +126,15 @@ func (m *MetricsSuite) TestInitCounterVec(c *C) { } registry := prometheus.NewRegistry() metrics, err := registry.Gather() - c.Assert(metrics, IsNil) - c.Assert(err, IsNil) + c.Assert(metrics, check.IsNil) + c.Assert(err, check.IsNil) actionSetCounterVec := InitCounterVec(registry, actionSetCounterOpts, boundedLabels) metrics, err = registry.Gather() - c.Assert(metrics, NotNil) - c.Assert(err, IsNil) - c.Assert(len(metrics), Equals, 1) + c.Assert(metrics, check.NotNil) + c.Assert(err, check.IsNil) + c.Assert(len(metrics), check.Equals, 1) expectedOperationTypes := map[string]int{"backup": 0, "restore": 0} expectedResolutionTypes := map[string]int{"success": 0, "failure": 0} @@ -147,13 +147,13 @@ func (m *MetricsSuite) TestInitCounterVec(c *C) { } } } - c.Assert(expectedOperationTypes["backup"], Equals, 2) - c.Assert(expectedOperationTypes["restore"], Equals, 2) - c.Assert(expectedResolutionTypes["success"], Equals, 2) - c.Assert(expectedResolutionTypes["failure"], Equals, 2) - - c.Assert(testutil.ToFloat64(actionSetCounterVec.WithLabelValues("backup", "success")), Equals, float64(0)) - c.Assert(testutil.ToFloat64(actionSetCounterVec.WithLabelValues("backup", "failure")), Equals, float64(0)) - c.Assert(testutil.ToFloat64(actionSetCounterVec.WithLabelValues("restore", "success")), Equals, float64(0)) - c.Assert(testutil.ToFloat64(actionSetCounterVec.WithLabelValues("restore", "failure")), Equals, float64(0)) + c.Assert(expectedOperationTypes["backup"], check.Equals, 2) + c.Assert(expectedOperationTypes["restore"], check.Equals, 2) + c.Assert(expectedResolutionTypes["success"], check.Equals, 2) + c.Assert(expectedResolutionTypes["failure"], check.Equals, 2) + + c.Assert(testutil.ToFloat64(actionSetCounterVec.WithLabelValues("backup", "success")), check.Equals, float64(0)) + c.Assert(testutil.ToFloat64(actionSetCounterVec.WithLabelValues("backup", "failure")), check.Equals, float64(0)) + c.Assert(testutil.ToFloat64(actionSetCounterVec.WithLabelValues("restore", "success")), check.Equals, float64(0)) + c.Assert(testutil.ToFloat64(actionSetCounterVec.WithLabelValues("restore", "failure")), check.Equals, float64(0)) } diff --git a/pkg/objectstore/bucket_test.go b/pkg/objectstore/bucket_test.go index f6650d2e54..18b35bb58b 100644 --- a/pkg/objectstore/bucket_test.go +++ b/pkg/objectstore/bucket_test.go @@ -4,21 +4,21 @@ import ( "context" "fmt" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type BucketSuite struct{} -var _ = Suite(&BucketSuite{}) +var _ = check.Suite(&BucketSuite{}) -func (s *BucketSuite) SetUpSuite(c *C) { +func (s *BucketSuite) SetUpSuite(c *check.C) { getEnvOrSkip(c, "AWS_ACCESS_KEY_ID") getEnvOrSkip(c, "AWS_SECRET_ACCESS_KEY") } const ahmRe = `[\w\W]*AuthorizationHeaderMalformed[\w\W]*` -func (s *BucketSuite) TestInvalidS3RegionEndpointMismatch(c *C) { +func (s *BucketSuite) TestInvalidS3RegionEndpointMismatch(c *check.C) { ctx := context.Background() const pt = ProviderTypeS3 const bn = `kanister-fake-bucket` @@ -35,23 +35,23 @@ func (s *BucketSuite) TestInvalidS3RegionEndpointMismatch(c *C) { }, secret, ) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Get Bucket will use the region's correct endpoint. _, err = p.GetBucket(ctx, bn) - c.Assert(err, ErrorMatches, ahmRe) - c.Assert(err, NotNil) + c.Assert(err, check.ErrorMatches, ahmRe) + c.Assert(err, check.NotNil) _, err = p.CreateBucket(ctx, bn) - c.Assert(err, ErrorMatches, ahmRe) - c.Assert(err, NotNil) + c.Assert(err, check.ErrorMatches, ahmRe) + c.Assert(err, check.NotNil) err = p.DeleteBucket(ctx, bn) - c.Assert(err, ErrorMatches, ahmRe) - c.Assert(err, NotNil) + c.Assert(err, check.ErrorMatches, ahmRe) + c.Assert(err, check.NotNil) } -func (s *BucketSuite) TestValidS3ClientBucketRegionMismatch(c *C) { +func (s *BucketSuite) TestValidS3ClientBucketRegionMismatch(c *check.C) { ctx := context.Background() const pt = ProviderTypeS3 const bn = `kanister-test-bucket-us-west-1` @@ -79,67 +79,67 @@ func (s *BucketSuite) TestValidS3ClientBucketRegionMismatch(c *C) { // p1's region matches the bucket's region. p1, err := NewProvider(ctx, pc1, secret) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // p2's region does not match the bucket's region, but does not specify an // endpoint. p2, err := NewProvider(ctx, pc2, secret) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // p3's region does not match the bucket's region and specifies an endpoint. p3, err := NewProvider(ctx, pc3, secret) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Delete and recreate the bucket to ensure it's region is r1. _ = p1.DeleteBucket(ctx, bn) _, err = p1.CreateBucket(ctx, bn) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err = p1.DeleteBucket(ctx, bn) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() // Check the bucket's region is r1 err = checkProviderWithBucket(c, ctx, p1, bn, r1) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // We can read a bucket even though it our provider's does not match, as // long as we don't specify an endpoint. err = checkProviderWithBucket(c, ctx, p2, bn, r1) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Specifying an the wrong endpoint causes bucket ops to fail. err = checkProviderWithBucket(c, ctx, p3, bn, r1) - c.Assert(err, NotNil) - c.Assert(err, ErrorMatches, ahmRe) + c.Assert(err, check.NotNil) + c.Assert(err, check.ErrorMatches, ahmRe) } -func checkProviderWithBucket(c *C, ctx context.Context, p Provider, bucketName, region string) error { +func checkProviderWithBucket(c *check.C, ctx context.Context, p Provider, bucketName, region string) error { bs, err := p.ListBuckets(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, ok := bs[bucketName] - c.Assert(ok, Equals, true) + c.Assert(ok, check.Equals, true) // We should fail here if the endpoint is set and does not match bucket region. b, err := p.GetBucket(ctx, bucketName) if err != nil { return err } - c.Assert(err, IsNil) - c.Assert(b, NotNil) + c.Assert(err, check.IsNil) + c.Assert(b, check.NotNil) s3p, ok := p.(*s3Provider) - c.Assert(ok, Equals, true) - c.Assert(s3p, NotNil) + c.Assert(ok, check.Equals, true) + c.Assert(s3p, check.NotNil) r, err := s3p.GetRegionForBucket(ctx, bucketName) - c.Assert(err, IsNil) - c.Assert(r, Equals, region) + c.Assert(err, check.IsNil) + c.Assert(r, check.Equals, region) _, err = b.ListObjects(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) return nil } -func (s *BucketSuite) TestGetRegionForBucket(c *C) { +func (s *BucketSuite) TestGetRegionForBucket(c *check.C) { ctx := context.Background() const pt = ProviderTypeS3 secret := getSecret(ctx, c, pt) @@ -154,14 +154,14 @@ func (s *BucketSuite) TestGetRegionForBucket(c *C) { //Endpoint: "http://127.0.0.1:9000", } p, err := NewProvider(ctx, pc, secret) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = p.getOrCreateBucket(ctx, existingBucket) c.Log(fmt.Sprintf("%+v", err)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) bucket, err := p.GetBucket(ctx, nonExistentBucket) c.Log(bucket, err) - c.Assert(err, NotNil) - c.Assert(IsBucketNotFoundError(err), Equals, true) + c.Assert(err, check.NotNil) + c.Assert(IsBucketNotFoundError(err), check.Equals, true) for _, tc := range []struct { bucketName string @@ -252,17 +252,17 @@ func (s *BucketSuite) TestGetRegionForBucket(c *C) { }, secret, ) - c.Assert(err, IsNil) - cmt := Commentf("Case: %#v", tc) + c.Assert(err, check.IsNil) + cmt := check.Commentf("Case: %#v", tc) sp, ok := p.(*s3Provider) - c.Assert(ok, Equals, true) + c.Assert(ok, check.Equals, true) rfb, err := sp.GetRegionForBucket(ctx, tc.bucketName) if tc.valid { - c.Assert(err, IsNil, cmt) - c.Assert(rfb, Equals, tc.bucketRegion, cmt) + c.Assert(err, check.IsNil, cmt) + c.Assert(rfb, check.Equals, tc.bucketRegion, cmt) } else { - c.Assert(err, NotNil, cmt) + c.Assert(err, check.NotNil, cmt) } } } diff --git a/pkg/objectstore/objectstore_test.go b/pkg/objectstore/objectstore_test.go index 7050a1ed3c..1457d509f1 100644 --- a/pkg/objectstore/objectstore_test.go +++ b/pkg/objectstore/objectstore_test.go @@ -29,12 +29,12 @@ import ( "github.com/graymeta/stow" "golang.org/x/oauth2/google" "google.golang.org/api/compute/v1" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/aws" ) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type ObjectStoreProviderSuite struct { osType ProviderType @@ -52,11 +52,11 @@ const ( testRegionS3 = "us-west-2" ) -var _ = Suite(&ObjectStoreProviderSuite{osType: ProviderTypeS3, region: testRegionS3}) -var _ = Suite(&ObjectStoreProviderSuite{osType: ProviderTypeGCS, region: ""}) -var _ = Suite(&ObjectStoreProviderSuite{osType: ProviderTypeAzure, region: ""}) +var _ = check.Suite(&ObjectStoreProviderSuite{osType: ProviderTypeS3, region: testRegionS3}) +var _ = check.Suite(&ObjectStoreProviderSuite{osType: ProviderTypeGCS, region: ""}) +var _ = check.Suite(&ObjectStoreProviderSuite{osType: ProviderTypeAzure, region: ""}) -func (s *ObjectStoreProviderSuite) SetUpSuite(c *C) { +func (s *ObjectStoreProviderSuite) SetUpSuite(c *check.C) { switch s.osType { case ProviderTypeS3: getEnvOrSkip(c, "AWS_ACCESS_KEY_ID") @@ -78,25 +78,25 @@ func (s *ObjectStoreProviderSuite) SetUpSuite(c *C) { ctx := context.Background() s.root, err = GetOrCreateBucket(ctx, s.provider, testBucketName) - c.Check(err, IsNil) - c.Assert(s.root, NotNil) + c.Check(err, check.IsNil) + c.Assert(s.root, check.NotNil) // While two concurrent instances could potentially collide, the probability // is extremely low. This approach makes the directory prefix informative. s.suiteDirPrefix = time.Now().UTC().Format(time.RFC3339Nano) } -func (s *ObjectStoreProviderSuite) SetUpTest(c *C) { +func (s *ObjectStoreProviderSuite) SetUpTest(c *check.C) { s.initProvider(c, s.region) s.testDir = s.suiteDirPrefix + "-" + c.TestName() } -func (s *ObjectStoreProviderSuite) TearDownTest(c *C) { +func (s *ObjectStoreProviderSuite) TearDownTest(c *check.C) { if s.testDir != "" { cleanupBucketDirectory(c, s.root, s.testDir) } } -func (s *ObjectStoreProviderSuite) initProvider(c *C, region string) { +func (s *ObjectStoreProviderSuite) initProvider(c *check.C, region string) { ctx := context.Background() var err error pc := ProviderConfig{ @@ -106,12 +106,12 @@ func (s *ObjectStoreProviderSuite) initProvider(c *C, region string) { } secret := getSecret(ctx, c, s.osType) s.provider, err = NewProvider(ctx, pc, secret) - c.Check(err, IsNil) - c.Assert(s.provider, NotNil) + c.Check(err, check.IsNil) + c.Assert(s.provider, check.NotNil) } // Verifies bucket operations, create/delete/list -func (s *ObjectStoreProviderSuite) TestBuckets(c *C) { +func (s *ObjectStoreProviderSuite) TestBuckets(c *check.C) { c.Skip("intermittently fails due to rate limits on bucket creation") ctx := context.Background() bucketName := s.createBucketName(c) @@ -119,51 +119,51 @@ func (s *ObjectStoreProviderSuite) TestBuckets(c *C) { origBuckets, _ := s.provider.ListBuckets(ctx) _, err := s.provider.CreateBucket(ctx, bucketName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Duplicate bucket _, err = s.provider.CreateBucket(ctx, bucketName) - c.Assert(err, Not(IsNil)) + c.Assert(err, check.Not(check.IsNil)) // Should be one more than buckets. Can be racy with other activity // and so checking for inequality buckets, _ := s.provider.ListBuckets(ctx) - c.Check(len(buckets), Not(Equals), len(origBuckets)) + c.Check(len(buckets), check.Not(check.Equals), len(origBuckets)) bucket, err := s.provider.GetBucket(ctx, bucketName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) c.Logf("Created bucket: %s", bucket) - c.Check(len(buckets), Not(Equals), len(origBuckets)) + c.Check(len(buckets), check.Not(check.Equals), len(origBuckets)) // Check if deletion succeeds err = s.provider.DeleteBucket(ctx, bucketName) - c.Check(err, IsNil) + c.Check(err, check.IsNil) } -func (s *ObjectStoreProviderSuite) TestCreateExistingBucket(c *C) { +func (s *ObjectStoreProviderSuite) TestCreateExistingBucket(c *check.C) { ctx := context.Background() // The bucket should already exist, the suite setup creates it d, err := s.provider.GetBucket(ctx, testBucketName) - c.Check(err, IsNil) - c.Check(d, NotNil) + c.Check(err, check.IsNil) + c.Check(d, check.NotNil) d, err = s.provider.CreateBucket(ctx, testBucketName) - c.Check(err, NotNil) - c.Check(d, IsNil) + c.Check(err, check.NotNil) + c.Check(d, check.IsNil) } -func (s *ObjectStoreProviderSuite) TestGetNonExistingBucket(c *C) { +func (s *ObjectStoreProviderSuite) TestGetNonExistingBucket(c *check.C) { if s.osType != ProviderTypeS3 { c.Skip("Test only applicable to AWS S3") } ctx := context.Background() bucketName := s.createBucketName(c) bucket, err := s.provider.GetBucket(ctx, bucketName) - c.Check(err, NotNil) - c.Assert(IsBucketNotFoundError(err), Equals, true) - c.Check(bucket, IsNil) + c.Check(err, check.NotNil) + c.Assert(IsBucketNotFoundError(err), check.Equals, true) + c.Check(bucket, check.IsNil) } -func (s *ObjectStoreProviderSuite) TestCreateExistingBucketS3Regions(c *C) { +func (s *ObjectStoreProviderSuite) TestCreateExistingBucketS3Regions(c *check.C) { if s.osType != ProviderTypeS3 { c.Skip("Test only applicable to AWS S3") } @@ -171,22 +171,22 @@ func (s *ObjectStoreProviderSuite) TestCreateExistingBucketS3Regions(c *C) { for _, region := range []string{"us-east-2", testRegionS3, "us-east-1", "us-west-1"} { s.initProvider(c, region) d, err := s.provider.CreateBucket(ctx, testBucketName) - c.Check(err, NotNil) - c.Check(d, IsNil) + c.Check(err, check.NotNil) + c.Check(d, check.IsNil) } } // TestDirectories verifies directory operations: create, list, delete -func (s *ObjectStoreProviderSuite) TestDirectories(c *C) { +func (s *ObjectStoreProviderSuite) TestDirectories(c *check.C) { ctx := context.Background() rootDirectory, err := s.root.CreateDirectory(ctx, s.testDir) - c.Assert(err, IsNil) - c.Assert(rootDirectory, NotNil) + c.Assert(err, check.IsNil) + c.Assert(rootDirectory, check.NotNil) directories, err := rootDirectory.ListDirectories(ctx) - c.Check(err, IsNil) + c.Check(err, check.IsNil) // Expecting nothing - c.Check(directories, HasLen, 0) + c.Check(directories, check.HasLen, 0) const ( dir1 = "directory1" @@ -194,98 +194,98 @@ func (s *ObjectStoreProviderSuite) TestDirectories(c *C) { ) _, err = rootDirectory.CreateDirectory(ctx, dir1) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Expecting only /dir1 directories, err = rootDirectory.ListDirectories(ctx) - c.Check(err, IsNil) - c.Check(directories, HasLen, 1) + c.Check(err, check.IsNil) + c.Check(directories, check.HasLen, 1) _, ok := directories[dir1] - c.Check(ok, Equals, true) + c.Check(ok, check.Equals, true) // Expecting only /dir1 directory, err := rootDirectory.GetDirectory(ctx, dir1) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Expecting /dir1/dir2 directory2, err := directory.CreateDirectory(ctx, dir2) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) directories, err = directory2.ListDirectories(ctx) - c.Check(err, IsNil) - c.Check(directories, HasLen, 0) + c.Check(err, check.IsNil) + c.Check(directories, check.HasLen, 0) directories, err = directory.ListDirectories(ctx) - c.Check(err, IsNil) - c.Check(directories, HasLen, 1) + c.Check(err, check.IsNil) + c.Check(directories, check.HasLen, 1) directories, err = rootDirectory.ListDirectories(ctx) - c.Check(err, IsNil) - c.Check(directories, HasLen, 1) + c.Check(err, check.IsNil) + c.Check(directories, check.HasLen, 1) // Get dir1/dir2 from root _, err = rootDirectory.GetDirectory(ctx, path.Join(dir1, dir2)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Get dir1/dir2 from any directory d2Name := path.Join(s.testDir, dir1, dir2) directory2, err = directory.GetDirectory(ctx, path.Join("/", d2Name)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Test delete directory // Create objects and directories under dir1/dir2 and under dir1 _, err = directory2.CreateDirectory(ctx, "d1d2d0") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = directory2.CreateDirectory(ctx, "d1d2d1") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = directory2.PutBytes(ctx, "d1d2o0", nil, nil) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = directory.CreateDirectory(ctx, "d1d0") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = directory.CreateDirectory(ctx, "d1d1") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = directory.PutBytes(ctx, "d1o0", nil, nil) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // objects and directories in directory1 should be there ds, err := directory.ListDirectories(ctx) - c.Assert(err, IsNil) - c.Assert(ds, HasLen, 3) + c.Assert(err, check.IsNil) + c.Assert(ds, check.HasLen, 3) err = directory2.DeleteDirectory(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cont := getStowContainer(c, directory2) checkNoItemsWithPrefix(c, cont, d2Name) directory2, err = directory.GetDirectory(ctx, dir2) // directory2 should no longer exist - c.Assert(err, NotNil) - c.Assert(directory2, IsNil) + c.Assert(err, check.NotNil) + c.Assert(directory2, check.IsNil) // other objects in directory1 should be there ds, err = directory.ListDirectories(ctx) - c.Assert(err, IsNil) - c.Assert(ds, HasLen, 2) + c.Assert(err, check.IsNil) + c.Assert(ds, check.HasLen, 2) obs, err := directory.ListObjects(ctx) - c.Assert(err, IsNil) - c.Assert(obs, HasLen, 1) - c.Assert(obs[0], Equals, "d1o0") + c.Assert(err, check.IsNil) + c.Assert(obs, check.HasLen, 1) + c.Assert(obs[0], check.Equals, "d1o0") directory, err = rootDirectory.GetDirectory(ctx, dir1) - c.Check(err, IsNil) + c.Check(err, check.IsNil) // Delete everything by deleting the parent directory err = directory.DeleteDirectory(ctx) - c.Check(err, IsNil) + c.Check(err, check.IsNil) checkNoItemsWithPrefix(c, cont, dir1) } -func (s *ObjectStoreProviderSuite) TestDeleteAllWithPrefix(c *C) { +func (s *ObjectStoreProviderSuite) TestDeleteAllWithPrefix(c *check.C) { ctx := context.Background() rootDirectory, err := s.root.CreateDirectory(ctx, s.testDir) - c.Assert(err, IsNil) - c.Assert(rootDirectory, NotNil) + c.Assert(err, check.IsNil) + c.Assert(rootDirectory, check.NotNil) const ( dir1 = "directory1" dir2 = "directory2" @@ -293,33 +293,33 @@ func (s *ObjectStoreProviderSuite) TestDeleteAllWithPrefix(c *C) { ) directory, err := rootDirectory.CreateDirectory(ctx, dir1) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Expecting /dir1/dir2 _, err = directory.CreateDirectory(ctx, dir2) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Expecting root dir to have /dir1/dir2 and /dir3 _, err = rootDirectory.CreateDirectory(ctx, dir3) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Delete everything with prefix "dir1" err = rootDirectory.DeleteAllWithPrefix(ctx, dir1) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Expecting root dir to have /dir3 directories, err := rootDirectory.ListDirectories(ctx) - c.Check(err, IsNil) - c.Check(directories, HasLen, 1) + c.Check(err, check.IsNil) + c.Check(directories, check.HasLen, 1) _, ok := directories[dir3] - c.Check(ok, Equals, true) + c.Check(ok, check.Equals, true) } // TestObjects verifies object operations: GetBytes and PutBytes -func (s *ObjectStoreProviderSuite) TestObjects(c *C) { +func (s *ObjectStoreProviderSuite) TestObjects(c *check.C) { ctx := context.Background() rootDirectory, err := s.root.CreateDirectory(ctx, s.testDir) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) const ( obj1 = "object1" @@ -335,35 +335,35 @@ func (s *ObjectStoreProviderSuite) TestObjects(c *C) { } err = rootDirectory.PutBytes(ctx, obj1, []byte(data1), nil) - c.Check(err, IsNil) + c.Check(err, check.IsNil) objs, err := rootDirectory.ListObjects(ctx) - c.Check(err, IsNil) - c.Assert(objs, HasLen, 1) - c.Check(objs[0], Equals, obj1) + c.Check(err, check.IsNil) + c.Assert(objs, check.HasLen, 1) + c.Check(objs[0], check.Equals, obj1) data, _, err := rootDirectory.GetBytes(ctx, obj1) - c.Check(err, IsNil) - c.Check(data, DeepEquals, []byte(data1)) + c.Check(err, check.IsNil) + c.Check(data, check.DeepEquals, []byte(data1)) _ = rootDirectory.PutBytes(ctx, obj2, []byte(data2), tags) data, ntags, err := rootDirectory.GetBytes(ctx, obj2) - c.Check(err, IsNil) - c.Check(data, DeepEquals, []byte(data2)) - c.Check(ntags, DeepEquals, tags) + c.Check(err, check.IsNil) + c.Check(data, check.DeepEquals, []byte(data2)) + c.Check(ntags, check.DeepEquals, tags) err = rootDirectory.Delete(ctx, obj1) - c.Check(err, IsNil) + c.Check(err, check.IsNil) err = rootDirectory.Delete(ctx, obj2) - c.Check(err, IsNil) + c.Check(err, check.IsNil) } // TestObjectsStreaming verifies object operations: Get and Put -func (s *ObjectStoreProviderSuite) TestObjectsStreaming(c *C) { +func (s *ObjectStoreProviderSuite) TestObjectsStreaming(c *check.C) { ctx := context.Background() rootDirectory, err := s.root.CreateDirectory(ctx, s.testDir) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) const ( obj1 = "object1" @@ -382,40 +382,40 @@ func (s *ObjectStoreProviderSuite) TestObjectsStreaming(c *C) { data2B := []byte(data2) err = rootDirectory.Put(ctx, obj1, bytes.NewReader(data1B), int64(len(data1B)), nil) - c.Check(err, IsNil) + c.Check(err, check.IsNil) objs, err := rootDirectory.ListObjects(ctx) - c.Check(err, IsNil) - c.Assert(objs, HasLen, 1) - c.Check(objs[0], Equals, obj1) + c.Check(err, check.IsNil) + c.Assert(objs, check.HasLen, 1) + c.Check(objs[0], check.Equals, obj1) r, _, err := rootDirectory.Get(ctx, obj1) - c.Check(err, IsNil) + c.Check(err, check.IsNil) data, err := io.ReadAll(r) - c.Check(err, IsNil) + c.Check(err, check.IsNil) err = r.Close() - c.Assert(err, IsNil) - c.Check(data, DeepEquals, data1B) + c.Assert(err, check.IsNil) + c.Check(data, check.DeepEquals, data1B) err = rootDirectory.Put(ctx, obj2, bytes.NewReader(data2B), int64(len(data2B)), tags) - c.Check(err, IsNil) + c.Check(err, check.IsNil) r, ntags, err := rootDirectory.Get(ctx, obj2) - c.Check(err, IsNil) + c.Check(err, check.IsNil) data, err = io.ReadAll(r) - c.Check(err, IsNil) + c.Check(err, check.IsNil) err = r.Close() - c.Assert(err, IsNil) - c.Check(data, DeepEquals, data2B) - c.Check(ntags, DeepEquals, tags) + c.Assert(err, check.IsNil) + c.Check(data, check.DeepEquals, data2B) + c.Check(ntags, check.DeepEquals, tags) err = rootDirectory.Delete(ctx, obj1) - c.Check(err, IsNil) + c.Check(err, check.IsNil) err = rootDirectory.Delete(ctx, obj2) - c.Check(err, IsNil) + c.Check(err, check.IsNil) } -func (s *ObjectStoreProviderSuite) createBucketName(c *C) string { +func (s *ObjectStoreProviderSuite) createBucketName(c *check.C) string { // Generate a bucket name bucketName := fmt.Sprintf("kio-io-tests-%v-%d", strings.ToLower(c.TestName()), s.rand.Uint32()) if len(bucketName) > 63 { @@ -428,64 +428,64 @@ func (s *ObjectStoreProviderSuite) createBucketName(c *C) string { return bucketName } -func checkNoItemsWithPrefix(c *C, cont stow.Container, prefix string) { +func checkNoItemsWithPrefix(c *check.C, cont stow.Container, prefix string) { items, _, err := cont.Items(prefix, stow.CursorStart, 2) - c.Assert(err, IsNil) - c.Assert(items, HasLen, 0) + c.Assert(err, check.IsNil) + c.Assert(items, check.HasLen, 0) } -func (s *ObjectStoreProviderSuite) TestBucketGetRegions(c *C) { +func (s *ObjectStoreProviderSuite) TestBucketGetRegions(c *check.C) { role := os.Getenv(aws.ConfigRole) if s.osType != ProviderTypeS3 || role != "" { c.Skip("Test only applicable to S3") } ctx := context.Background() origBucket, err := s.provider.GetBucket(ctx, testBucketName) - c.Assert(err, IsNil) - c.Assert(origBucket, NotNil) + c.Assert(err, check.IsNil) + c.Assert(origBucket, check.NotNil) // Creating an object in existing bucket to check it later when we call GetOrCreateBucket, // to see if existing bucket was returned orgBucketObjectName := s.suiteDirPrefix + "GetRegions" err = origBucket.PutBytes(ctx, orgBucketObjectName, []byte("content-getRegions"), nil) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err = origBucket.Delete(ctx, orgBucketObjectName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() b, err := GetOrCreateBucket(ctx, s.provider, testBucketName) - c.Assert(err, IsNil) - c.Assert(b, NotNil) + c.Assert(err, check.IsNil) + c.Assert(b, check.NotNil) // Checking if same bucket was returned by checking if object // that was created previously exists in newly retrieved bucket _, _, err = b.Get(ctx, orgBucketObjectName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) l, err := b.ListObjects(ctx) - c.Assert(err, IsNil) - c.Assert(l, NotNil) + c.Assert(err, check.IsNil) + c.Assert(l, check.NotNil) objectName := s.suiteDirPrefix + "foo" err = b.PutBytes(ctx, objectName, []byte("content"), nil) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = b.Delete(ctx, objectName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *ObjectStoreProviderSuite) TestBucketWrongRegion(c *C) { +func (s *ObjectStoreProviderSuite) TestBucketWrongRegion(c *check.C) { ctx := context.Background() for _, region := range []string{"us-east-1", "us-east-2", "us-west-1"} { s.initProvider(c, region) b, err := s.provider.GetBucket(ctx, testBucketName) - c.Check(err, IsNil) - c.Check(b, NotNil) + c.Check(err, check.IsNil) + c.Check(b, check.NotNil) _, err = b.ListObjects(ctx) - c.Check(err, IsNil) + c.Check(err, check.IsNil) } } -func getSecret(ctx context.Context, c *C, osType ProviderType) *Secret { +func getSecret(ctx context.Context, c *check.C, osType ProviderType) *Secret { secret := &Secret{} switch osType { case ProviderTypeS3: @@ -496,36 +496,36 @@ func getSecret(ctx context.Context, c *C, osType ProviderType) *Secret { aws.ConfigRole: os.Getenv("AWS_ROLE"), } creds, err := aws.GetCredentials(ctx, config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) val, err := creds.Get() - c.Check(err, IsNil) + c.Check(err, check.IsNil) secret.Aws = &SecretAws{ AccessKeyID: val.AccessKeyID, SecretAccessKey: val.SecretAccessKey, SessionToken: val.SessionToken, } - c.Check(secret.Aws.AccessKeyID, Not(Equals), "") - c.Check(secret.Aws.SecretAccessKey, Not(Equals), "") + c.Check(secret.Aws.AccessKeyID, check.Not(check.Equals), "") + c.Check(secret.Aws.SecretAccessKey, check.Not(check.Equals), "") case ProviderTypeGCS: creds, err := google.FindDefaultCredentials(context.Background(), compute.ComputeScope) - c.Check(err, IsNil) + c.Check(err, check.IsNil) secret.Type = SecretTypeGcpServiceAccountKey secret.Gcp = &SecretGcp{ ServiceKey: string(creds.JSON), ProjectID: creds.ProjectID, } - c.Check(secret.Gcp.ServiceKey, Not(Equals), "") - c.Check(secret.Gcp.ProjectID, Not(Equals), "") + c.Check(secret.Gcp.ServiceKey, check.Not(check.Equals), "") + c.Check(secret.Gcp.ProjectID, check.Not(check.Equals), "") case ProviderTypeAzure: secret.Type = SecretTypeAzStorageAccount secret.Azure = &SecretAzure{ StorageAccount: os.Getenv("AZURE_STORAGE_ACCOUNT_NAME"), StorageKey: os.Getenv("AZURE_STORAGE_ACCOUNT_KEY"), } - c.Check(secret.Azure.StorageAccount, Not(Equals), "") - c.Check(secret.Azure.StorageKey, Not(Equals), "") + c.Check(secret.Azure.StorageAccount, check.Not(check.Equals), "") + c.Check(secret.Azure.StorageKey, check.Not(check.Equals), "") default: c.Logf("Unsupported provider '%s'", osType) c.Fail() @@ -534,32 +534,32 @@ func getSecret(ctx context.Context, c *C, osType ProviderType) *Secret { } // Can be added to a common place in Kanister -func getEnvOrSkip(c *C, varName string) { +func getEnvOrSkip(c *check.C, varName string) { v := os.Getenv(varName) if v == "" { c.Skip("Required environment variable '" + varName + "' not set") } } -func cleanupBucketDirectory(c *C, bucket Bucket, directory string) { - c.Assert(bucket, NotNil) +func cleanupBucketDirectory(c *check.C, bucket Bucket, directory string) { + c.Assert(bucket, check.NotNil) ctx := context.Background() d, err := bucket.GetDirectory(ctx, directory) if err != nil { c.Log("Cannot cleanup test directory: ", directory) return } - c.Assert(d, NotNil) + c.Assert(d, check.NotNil) err = d.DeleteDirectory(ctx) - c.Check(err, IsNil) + c.Check(err, check.IsNil) } // getStowContainer checks that the given directory matches the implementation // type -func getStowContainer(c *C, d Directory) stow.Container { - c.Assert(d, FitsTypeOf, &directory{}) +func getStowContainer(c *check.C, d Directory) stow.Container { + c.Assert(d, check.FitsTypeOf, &directory{}) sd, ok := d.(*directory) - c.Assert(ok, Equals, true) - c.Assert(sd, NotNil) + c.Assert(ok, check.Equals, true) + c.Assert(sd, check.NotNil) return sd.bucket.container } diff --git a/pkg/output/output_test.go b/pkg/output/output_test.go index a63e681030..85b7518621 100644 --- a/pkg/output/output_test.go +++ b/pkg/output/output_test.go @@ -17,29 +17,29 @@ package output import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type OutputSuite struct{} -var _ = Suite(&OutputSuite{}) +var _ = check.Suite(&OutputSuite{}) -func (s *OutputSuite) TestValidateKey(c *C) { +func (s *OutputSuite) TestValidateKey(c *check.C) { for _, tc := range []struct { key string - checker Checker + checker check.Checker }{ - {"validKey", IsNil}, - {"validKey2", IsNil}, - {"valid_key", IsNil}, - {"invalid-key", NotNil}, - {"invalid.key", NotNil}, - {"`invalidKey", NotNil}, + {"validKey", check.IsNil}, + {"validKey2", check.IsNil}, + {"valid_key", check.IsNil}, + {"invalid-key", check.NotNil}, + {"invalid.key", check.NotNil}, + {"`invalidKey", check.NotNil}, } { err := ValidateKey(tc.key) - c.Check(err, tc.checker, Commentf("Key (%s) failed!", tc.key)) + c.Check(err, tc.checker, check.Commentf("Key (%s) failed!", tc.key)) } } diff --git a/pkg/output/stream_test.go b/pkg/output/stream_test.go index 853d8b77f1..d46c2f5d4b 100644 --- a/pkg/output/stream_test.go +++ b/pkg/output/stream_test.go @@ -21,7 +21,7 @@ import ( "math/rand" "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" apirand "k8s.io/apimachinery/pkg/util/rand" "github.com/kanisterio/kanister/pkg/output" @@ -39,7 +39,7 @@ const ( type OutputTestSuite struct{} -var _ = Suite(&OutputTestSuite{}) +var _ = check.Suite(&OutputTestSuite{}) type testCase struct { prefixLength int @@ -165,102 +165,102 @@ func writePrefix(pw io.Writer, r *rand.Rand, tc testCase) { // TestLongStreamsWithoutPhaseOutput Will produce 10 long lines // each line will contain from 50Kb to 60Kb of random text // there will be no phase output in lines -func (s *OutputTestSuite) TestLongStreamsWithoutPhaseOutput(c *C) { +func (s *OutputTestSuite) TestLongStreamsWithoutPhaseOutput(c *check.C) { done := make(chan struct{}) defer func() { close(done) }() cases := generateTestCases(10, 50000, 0, 0, EndlineRequired) r := getTestReaderCloser(done, cases) m, e := output.LogAndParse(context.TODO(), r) - c.Check(e, IsNil) - c.Check(len(m), Equals, 0) + c.Check(e, check.IsNil) + c.Check(len(m), check.Equals, 0) } // TestShortStreamsWithPhaseOutput Will produce one short line // which will contain ONLY phase output and nothing else -func (s *OutputTestSuite) TestShortStreamsWithPhaseOutput(c *C) { +func (s *OutputTestSuite) TestShortStreamsWithPhaseOutput(c *check.C) { done := make(chan struct{}) defer func() { close(done) }() cases := generateTestCases(1, 0, 10, 50, EndlineRequired) r := getTestReaderCloser(done, cases) m, e := output.LogAndParse(context.TODO(), r) - c.Check(e, IsNil) - c.Check(len(m), Equals, 1) - c.Check(m[cases[0].key], Equals, string(cases[0].value)) + c.Check(e, check.IsNil) + c.Check(len(m), check.Equals, 1) + c.Check(m[cases[0].key], check.Equals, string(cases[0].value)) } // TestLongStreamsWithPhaseOutput Will produce 10 long lines // each line will contain from 10Kb to 12Kb of random text and // phase output preceded with newline -func (s *OutputTestSuite) TestLongStreamsWithPhaseOutput(c *C) { +func (s *OutputTestSuite) TestLongStreamsWithPhaseOutput(c *check.C) { done := make(chan struct{}) defer func() { close(done) }() cases := generateTestCases(10, 10000, 10, 50, EndlineRequired) r := getTestReaderCloser(done, cases) m, e := output.LogAndParse(context.TODO(), r) - c.Check(e, IsNil) - c.Check(len(m), Equals, 10) - c.Check(m[cases[0].key], Equals, string(cases[0].value)) + c.Check(e, check.IsNil) + c.Check(len(m), check.Equals, 10) + c.Check(m[cases[0].key], check.Equals, string(cases[0].value)) } // TestHugeStreamsWithHugePhaseOutput Will produce five huge lines // each line will contain ±100Kb of random text WITH newline before Phase Output mark // Phase output value will be very short -func (s *OutputTestSuite) TestHugeStreamsWithPhaseOutput(c *C) { +func (s *OutputTestSuite) TestHugeStreamsWithPhaseOutput(c *check.C) { done := make(chan struct{}) defer func() { close(done) }() cases := generateTestCases(5, 100000, 10, 50, EndlineRequired) r := getTestReaderCloser(done, cases) m, e := output.LogAndParse(context.TODO(), r) - c.Check(e, IsNil) - c.Check(len(m), Equals, 5) - c.Check(m[cases[0].key], Equals, string(cases[0].value)) + c.Check(e, check.IsNil) + c.Check(len(m), check.Equals, 5) + c.Check(m[cases[0].key], check.Equals, string(cases[0].value)) } // TestHugeStreamsWithHugePhaseOutput Will produce five huge lines // each line will contain ±500Kb of random text WITH newline before Phase Output mark // Phase output value will be ±10Kb of random text -func (s *OutputTestSuite) TestHugeStreamsWithLongPhaseOutput(c *C) { +func (s *OutputTestSuite) TestHugeStreamsWithLongPhaseOutput(c *check.C) { done := make(chan struct{}) defer func() { close(done) }() cases := generateTestCases(5, 500000, 10, 10000, EndlineRequired) r := getTestReaderCloser(done, cases) m, e := output.LogAndParse(context.TODO(), r) - c.Check(e, IsNil) - c.Check(len(m), Equals, 5) - c.Check(m[cases[0].key], Equals, string(cases[0].value)) + c.Check(e, check.IsNil) + c.Check(len(m), check.Equals, 5) + c.Check(m[cases[0].key], check.Equals, string(cases[0].value)) } // TestHugeStreamsWithHugePhaseOutput Will produce one huge line // which will contain ±500Kb of random text WITH newline before Phase Output mark // Phase output value will also be ±500Kb -func (s *OutputTestSuite) TestHugeStreamsWithHugePhaseOutput(c *C) { +func (s *OutputTestSuite) TestHugeStreamsWithHugePhaseOutput(c *check.C) { done := make(chan struct{}) defer func() { close(done) }() cases := generateTestCases(1, 500000, 10, 500000, EndlineRequired) r := getTestReaderCloser(done, cases) m, e := output.LogAndParse(context.TODO(), r) - c.Check(e, IsNil) - c.Check(len(m), Equals, 1) - c.Check(m[cases[0].key], Equals, string(cases[0].value)) + c.Check(e, check.IsNil) + c.Check(len(m), check.Equals, 1) + c.Check(m[cases[0].key], check.Equals, string(cases[0].value)) } // TestHugeStreamsWithHugePhaseOutputWithoutNewlineDelimiter Will produce one huge line // which will contain ±500Kb of random text WITHOUT newline before Phase Output mark // Phase output value will also be ±500Kb -func (s *OutputTestSuite) TestHugeStreamsWithHugePhaseOutputWithoutNewlineDelimiter(c *C) { +func (s *OutputTestSuite) TestHugeStreamsWithHugePhaseOutputWithoutNewlineDelimiter(c *check.C) { done := make(chan struct{}) defer func() { close(done) }() cases := generateTestCases(1, 500000, 10, 500000, EndlineProhibited) r := getTestReaderCloser(done, cases) m, e := output.LogAndParse(context.TODO(), r) - c.Check(e, IsNil) - c.Check(len(m), Equals, 1) - c.Check(m[cases[0].key], Equals, string(cases[0].value)) + c.Check(e, check.IsNil) + c.Check(len(m), check.Equals, 1) + c.Check(m[cases[0].key], check.Equals, string(cases[0].value)) } diff --git a/pkg/param/param_test.go b/pkg/param/param_test.go index 1c41cd4e8b..fc7473cff1 100644 --- a/pkg/param/param_test.go +++ b/pkg/param/param_test.go @@ -26,7 +26,7 @@ import ( osapps "github.com/openshift/api/apps/v1" osversioned "github.com/openshift/client-go/apps/clientset/versioned" osfake "github.com/openshift/client-go/apps/clientset/versioned/fake" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -45,7 +45,7 @@ import ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type ParamsSuite struct { cli kubernetes.Interface @@ -55,11 +55,11 @@ type ParamsSuite struct { osCli osversioned.Interface } -var _ = Suite(&ParamsSuite{}) +var _ = check.Suite(&ParamsSuite{}) -func (s *ParamsSuite) SetUpSuite(c *C) { +func (s *ParamsSuite) SetUpSuite(c *check.C) { cli, err := kube.NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -67,12 +67,12 @@ func (s *ParamsSuite) SetUpSuite(c *C) { }, } cns, err := s.cli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name s.dynCli = fakedyncli.NewSimpleDynamicClient(scheme.Scheme, cns) } -func (s *ParamsSuite) SetUpTest(c *C) { +func (s *ParamsSuite) SetUpTest(c *check.C) { pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "kanisterparamtest-", @@ -87,19 +87,19 @@ func (s *ParamsSuite) SetUpTest(c *C) { }, } cPVC, err := s.cli.CoreV1().PersistentVolumeClaims(s.namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.pvc = cPVC.Name } -func (s *ParamsSuite) TearDownSuite(c *C) { +func (s *ParamsSuite) TearDownSuite(c *check.C) { if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } } -func (s *ParamsSuite) TearDownTest(c *C) { +func (s *ParamsSuite) TearDownTest(c *check.C) { err := s.cli.CoreV1().PersistentVolumeClaims(s.namespace).Delete(context.TODO(), s.pvc, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } const ssSpec = ` @@ -132,19 +132,19 @@ spec: claimName: %s ` -func (s *ParamsSuite) TestFetchStatefulSetParams(c *C) { +func (s *ParamsSuite) TestFetchStatefulSetParams(c *check.C) { ctx := context.Background() name := strings.ToLower(c.TestName()) name = strings.Replace(name, ".", "", 1) spec := fmt.Sprintf(ssSpec, name, name, s.pvc) ss, err := kube.CreateStatefulSet(ctx, s.cli, s.namespace, spec) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnStatefulSetReady(ctx, s.cli, ss.Namespace, ss.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ssp, err := fetchStatefulSetParams(ctx, s.cli, s.namespace, name) - c.Assert(err, IsNil) - c.Assert(ssp, DeepEquals, &StatefulSetParams{ + c.Assert(err, check.IsNil) + c.Assert(ssp, check.DeepEquals, &StatefulSetParams{ Name: name, Namespace: s.namespace, Pods: []string{name + "-0"}, @@ -186,108 +186,108 @@ spec: claimName: %s ` -func (s *ParamsSuite) TestFetchDeploymentParams(c *C) { +func (s *ParamsSuite) TestFetchDeploymentParams(c *check.C) { ctx := context.Background() name := strings.ToLower(c.TestName()) name = strings.Replace(name, ".", "", 1) spec := fmt.Sprintf(deploySpec, name, name, s.pvc) d, err := kube.CreateDeployment(ctx, s.cli, s.namespace, spec) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnDeploymentReady(ctx, s.cli, d.Namespace, d.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) dp, err := fetchDeploymentParams(ctx, s.cli, s.namespace, name) - c.Assert(err, IsNil) - c.Assert(dp.Namespace, Equals, s.namespace) - c.Assert(dp.Pods, HasLen, 1) - c.Assert(dp.Containers, DeepEquals, [][]string{{"test-container"}}) - c.Assert(dp.PersistentVolumeClaims, DeepEquals, map[string]map[string]string{ + c.Assert(err, check.IsNil) + c.Assert(dp.Namespace, check.Equals, s.namespace) + c.Assert(dp.Pods, check.HasLen, 1) + c.Assert(dp.Containers, check.DeepEquals, [][]string{{"test-container"}}) + c.Assert(dp.PersistentVolumeClaims, check.DeepEquals, map[string]map[string]string{ dp.Pods[0]: { s.pvc: "/mnt/data/" + name, }, }) } -func (s *ParamsSuite) TestFetchDeploymentConfigParams(c *C) { +func (s *ParamsSuite) TestFetchDeploymentConfigParams(c *check.C) { ok, err := kube.IsOSAppsGroupAvailable(context.Background(), s.cli.Discovery()) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) if !ok { c.Skip("Skipping test since this only runs on OpenShift") } cfg, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.osCli, err = osversioned.NewForConfig(cfg) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) depConf := newDeploymentConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // create a deploymentconfig ctx := context.Background() dc, err := s.osCli.AppsV1().DeploymentConfigs(s.namespace).Create(ctx, depConf, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // wait for deploymentconfig to be ready err = kube.WaitOnDeploymentConfigReady(ctx, s.osCli, s.cli, dc.Namespace, dc.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // get again achieve optimistic concurrency newDep, err := s.osCli.AppsV1().DeploymentConfigs(s.namespace).Get(ctx, dc.Name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // edit the deploymentconfig newDep.Spec.Template.Spec.Containers[0].Name = "newname" // update the deploymentconfig updatedDC, err := s.osCli.AppsV1().DeploymentConfigs(s.namespace).Update(ctx, newDep, metav1.UpdateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // once updated, it will take some time to new replicationcontroller and pods to be up and running // wait for deploymentconfig to be reay again err = kube.WaitOnDeploymentConfigReady(ctx, s.osCli, s.cli, dc.Namespace, updatedDC.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // fetch the deploymentconfig params dconf, err := fetchDeploymentConfigParams(ctx, s.cli, s.osCli, s.namespace, updatedDC.Name) - c.Assert(err, IsNil) - c.Assert(dconf.Namespace, Equals, s.namespace) - c.Assert(dconf.Pods, HasLen, 1) - c.Assert(dconf.Containers, DeepEquals, [][]string{{"newname"}}) + c.Assert(err, check.IsNil) + c.Assert(dconf.Namespace, check.Equals, s.namespace) + c.Assert(dconf.Pods, check.HasLen, 1) + c.Assert(dconf.Containers, check.DeepEquals, [][]string{{"newname"}}) // let's scale the deployment config and try things dConfig, err := s.osCli.AppsV1().DeploymentConfigs(s.namespace).Get(ctx, dc.Name, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // scale the replicas to 3 dConfig.Spec.Replicas = 3 updated, err := s.osCli.AppsV1().DeploymentConfigs(s.namespace).Update(ctx, dConfig, metav1.UpdateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // wait for deploymentconfig to be ready err = kube.WaitOnDeploymentConfigReady(ctx, s.osCli, s.cli, s.namespace, updated.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // fetch the deploymentconfig params dconfParams, err := fetchDeploymentConfigParams(ctx, s.cli, s.osCli, s.namespace, updated.Name) - c.Assert(err, IsNil) - c.Assert(dconfParams.Namespace, Equals, s.namespace) + c.Assert(err, check.IsNil) + c.Assert(dconfParams.Namespace, check.Equals, s.namespace) // number of pods should be chnanged to 3 - c.Assert(dconfParams.Pods, HasLen, 3) + c.Assert(dconfParams.Pods, check.HasLen, 3) } -func (s *ParamsSuite) TestFetchPVCParams(c *C) { +func (s *ParamsSuite) TestFetchPVCParams(c *check.C) { ctx := context.Background() testCases := []struct { name string pvc string - errChecker Checker + errChecker check.Checker }{ - {"Valid", s.pvc, IsNil}, - {"Invalid", "foo-pvc", NotNil}, + {"Valid", s.pvc, check.IsNil}, + {"Invalid", "foo-pvc", check.NotNil}, } for _, tc := range testCases { _, err := fetchPVCParams(ctx, s.cli, s.namespace, tc.pvc) - c.Check(err, tc.errChecker, Commentf("Test %s Failed!", tc.name)) + c.Check(err, tc.errChecker, check.Commentf("Test %s Failed!", tc.name)) } } @@ -300,64 +300,64 @@ data: someKey: some-value ` -func (s *ParamsSuite) TestNewTemplateParamsDeployment(c *C) { +func (s *ParamsSuite) TestNewTemplateParamsDeployment(c *check.C) { ctx := context.Background() name := strings.ToLower(c.TestName()) name = strings.Replace(name, ".", "", 1) spec := fmt.Sprintf(deploySpec, name, name, s.pvc) d, err := kube.CreateDeployment(ctx, s.cli, s.namespace, spec) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnDeploymentReady(ctx, s.cli, d.Namespace, d.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.testNewTemplateParams(ctx, c, s.getDynamicClient(c, d), crv1alpha1.ObjectReference{Name: name, Namespace: s.namespace, Kind: DeploymentKind}) } -func (s *ParamsSuite) TestNewTemplateParamsStatefulSet(c *C) { +func (s *ParamsSuite) TestNewTemplateParamsStatefulSet(c *check.C) { ctx := context.Background() name := strings.ToLower(c.TestName()) name = strings.Replace(name, ".", "", 1) spec := fmt.Sprintf(ssSpec, name, name, s.pvc) ss, err := kube.CreateStatefulSet(ctx, s.cli, s.namespace, spec) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnStatefulSetReady(ctx, s.cli, ss.Namespace, ss.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.testNewTemplateParams(ctx, c, s.getDynamicClient(c, ss), crv1alpha1.ObjectReference{Name: name, Namespace: s.namespace, Kind: StatefulSetKind}) } -func (s *ParamsSuite) TestNewTemplateParamsPVC(c *C) { +func (s *ParamsSuite) TestNewTemplateParamsPVC(c *check.C) { ctx := context.Background() pvc, err := s.cli.CoreV1().PersistentVolumeClaims(s.namespace).Get(context.TODO(), s.pvc, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.testNewTemplateParams(ctx, c, s.getDynamicClient(c, pvc), crv1alpha1.ObjectReference{Name: s.pvc, Namespace: s.namespace, Kind: PVCKind}) } -func (s *ParamsSuite) TestNewTemplateParamsNamespace(c *C) { +func (s *ParamsSuite) TestNewTemplateParamsNamespace(c *check.C) { ctx := context.Background() s.testNewTemplateParams(ctx, c, s.getDynamicClient(c), crv1alpha1.ObjectReference{Name: s.namespace, Namespace: s.namespace, Kind: NamespaceKind}) } -func (s *ParamsSuite) TestNewTemplateParamsUnstructured(c *C) { +func (s *ParamsSuite) TestNewTemplateParamsUnstructured(c *check.C) { ctx := context.Background() // Lookup the "default" serviceaccount in the test namespace sa, err := s.cli.CoreV1().ServiceAccounts(s.namespace).Get(context.TODO(), "default", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.testNewTemplateParams(ctx, c, s.getDynamicClient(c, sa), crv1alpha1.ObjectReference{Name: "default", Namespace: s.namespace, Group: "", APIVersion: "v1", Resource: "serviceaccounts"}) } -func (s *ParamsSuite) getDynamicClient(c *C, objects ...runtime.Object) dynamic.Interface { +func (s *ParamsSuite) getDynamicClient(c *check.C, objects ...runtime.Object) dynamic.Interface { ns, err := s.cli.CoreV1().Namespaces().Get(context.TODO(), s.namespace, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) objects = append(objects, ns) return fakedyncli.NewSimpleDynamicClient(scheme.Scheme, objects...) } -func (s *ParamsSuite) testNewTemplateParams(ctx context.Context, c *C, dynCli dynamic.Interface, object crv1alpha1.ObjectReference) { +func (s *ParamsSuite) testNewTemplateParams(ctx context.Context, c *check.C, dynCli dynamic.Interface, object crv1alpha1.ObjectReference) { spec := fmt.Sprintf(cmSpec, object.Name) cm, err := kube.CreateConfigMap(ctx, s.cli, s.namespace, spec) - c.Assert(err, IsNil) - c.Assert(cm, NotNil) + c.Assert(err, check.IsNil) + c.Assert(cm, check.NotNil) secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -388,21 +388,21 @@ func (s *ParamsSuite) testNewTemplateParams(ctx context.Context, c *C, dynCli dy }, } _, err = s.cli.CoreV1().Secrets(s.namespace).Create(ctx, secret, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { _ = s.cli.CoreV1().Secrets(s.namespace).Delete(context.TODO(), "secret-name", metav1.DeleteOptions{}) }() _, err = s.cli.CoreV1().Secrets(s.namespace).Get(ctx, "secret-name", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) osCli := osfake.NewSimpleClientset() crCli := crfake.NewSimpleClientset() _, err = crCli.CrV1alpha1().Profiles(s.namespace).Create(ctx, prof, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = crCli.CrV1alpha1().Profiles(s.namespace).Get(ctx, "profName", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) as := crv1alpha1.ActionSpec{ Object: object, @@ -448,32 +448,32 @@ func (s *ParamsSuite) testNewTemplateParams(ctx context.Context, c *C, dynCli dy artsTpl["objectNameArtifact"] = crv1alpha1.Artifact{KeyValue: map[string]string{"my-key": unstructuredTemplate}} tp, err := New(ctx, s.cli, dynCli, crCli, osCli, as) - c.Assert(err, IsNil) - c.Assert(tp.ConfigMaps["myCM"].Data, DeepEquals, map[string]string{"someKey": "some-value"}) - c.Assert(tp.Options, DeepEquals, map[string]string{"podName": "some-pod"}) + c.Assert(err, check.IsNil) + c.Assert(tp.ConfigMaps["myCM"].Data, check.DeepEquals, map[string]string{"someKey": "some-value"}) + c.Assert(tp.Options, check.DeepEquals, map[string]string{"podName": "some-pod"}) arts, err := RenderArtifacts(artsTpl, *tp) - c.Assert(err, IsNil) - c.Assert(arts["my-art"], DeepEquals, crv1alpha1.Artifact{KeyValue: map[string]string{"my-key": "some-value"}}) + c.Assert(err, check.IsNil) + c.Assert(arts["my-art"], check.DeepEquals, crv1alpha1.Artifact{KeyValue: map[string]string{"my-key": "some-value"}}) _, err = time.Parse(timeFormat, arts["my-time"].KeyValue["my-time"]) - c.Assert(err, IsNil) - c.Assert(arts["kindArtifact"], DeepEquals, crv1alpha1.Artifact{KeyValue: map[string]string{"my-key": object.Name}}) - c.Assert(arts["objectNameArtifact"], DeepEquals, crv1alpha1.Artifact{KeyValue: map[string]string{"my-key": object.Name}}) + c.Assert(err, check.IsNil) + c.Assert(arts["kindArtifact"], check.DeepEquals, crv1alpha1.Artifact{KeyValue: map[string]string{"my-key": object.Name}}) + c.Assert(arts["objectNameArtifact"], check.DeepEquals, crv1alpha1.Artifact{KeyValue: map[string]string{"my-key": object.Name}}) } -func (s *ParamsSuite) TestfetchKVSecretCredential(c *C) { +func (s *ParamsSuite) TestfetchKVSecretCredential(c *check.C) { ctx := context.Background() for _, tc := range []struct { secret *corev1.Secret kvs *crv1alpha1.KeyPair - checker Checker + checker check.Checker cred *Credential }{ { secret: &corev1.Secret{}, kvs: &crv1alpha1.KeyPair{}, cred: nil, - checker: NotNil, + checker: check.NotNil, }, { secret: &corev1.Secret{ @@ -494,17 +494,17 @@ func (s *ParamsSuite) TestfetchKVSecretCredential(c *C) { Secret: "bar", }, }, - checker: IsNil, + checker: check.IsNil, }, } { cli := fake.NewSimpleClientset(tc.secret) cred, err := fetchKeyPairCredential(ctx, cli, tc.kvs) c.Assert(err, tc.checker) - c.Assert(cred, DeepEquals, tc.cred) + c.Assert(cred, check.DeepEquals, tc.cred) } } -func (s *ParamsSuite) TestProfile(c *C) { +func (s *ParamsSuite) TestProfile(c *check.C) { ss := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: "ssName", @@ -534,11 +534,11 @@ func (s *ParamsSuite) TestProfile(c *C) { cli := fake.NewSimpleClientset(ss, pod, secret) dynCli := fakedyncli.NewSimpleDynamicClient(scheme.Scheme, ss) _, err := cli.AppsV1().StatefulSets("").List(ctx, metav1.ListOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = cli.CoreV1().Pods("").List(ctx, metav1.ListOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = cli.CoreV1().Secrets("").List(ctx, metav1.ListOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) prof := &crv1alpha1.Profile{ ObjectMeta: metav1.ObjectMeta{ @@ -581,20 +581,20 @@ func (s *ParamsSuite) TestProfile(c *C) { } crCli := crfake.NewSimpleClientset() _, err = crCli.CrV1alpha1().ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = crCli.CrV1alpha1().ActionSets(s.namespace).List(ctx, metav1.ListOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = crCli.CrV1alpha1().Profiles(s.namespace).Create(ctx, prof, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = crCli.CrV1alpha1().Profiles(s.namespace).List(ctx, metav1.ListOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) osCli := osfake.NewSimpleClientset() tp, err := New(ctx, cli, dynCli, crCli, osCli, as.Spec.Actions[0]) - c.Assert(err, IsNil) - c.Assert(tp.Profile, NotNil) - c.Assert(tp.Profile, DeepEquals, &Profile{ + c.Assert(err, check.IsNil) + c.Assert(tp.Profile, check.NotNil) + c.Assert(tp.Profile, check.DeepEquals, &Profile{ Location: crv1alpha1.Location{}, Credential: Credential{ Type: CredentialTypeKeyPair, @@ -606,7 +606,7 @@ func (s *ParamsSuite) TestProfile(c *C) { }) } -func (s *ParamsSuite) TestParamsWithoutProfile(c *C) { +func (s *ParamsSuite) TestParamsWithoutProfile(c *check.C) { ctx := context.Background() secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -620,16 +620,16 @@ func (s *ParamsSuite) TestParamsWithoutProfile(c *C) { }, } secret, err := s.cli.CoreV1().Secrets(s.namespace).Create(ctx, secret, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { _ = s.cli.CoreV1().Secrets(s.namespace).Delete(context.TODO(), "secret-name", metav1.DeleteOptions{}) }() _, err = s.cli.CoreV1().Secrets(s.namespace).Get(ctx, "secret-name", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pvc, err := s.cli.CoreV1().PersistentVolumeClaims(s.namespace).Get(ctx, s.pvc, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) dynCli := s.getDynamicClient(c, pvc) crCli := crfake.NewSimpleClientset() osCli := osfake.NewSimpleClientset() @@ -647,11 +647,11 @@ func (s *ParamsSuite) TestParamsWithoutProfile(c *C) { }, } tp, err := New(ctx, s.cli, dynCli, crCli, osCli, as) - c.Assert(err, IsNil) - c.Assert(tp, NotNil) + c.Assert(err, check.IsNil) + c.Assert(tp, check.NotNil) } -func (s *ParamsSuite) TestPhaseParams(c *C) { +func (s *ParamsSuite) TestPhaseParams(c *check.C) { ctx := context.Background() secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -682,23 +682,23 @@ func (s *ParamsSuite) TestPhaseParams(c *C) { }, } secret, err := s.cli.CoreV1().Secrets(s.namespace).Create(ctx, secret, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { _ = s.cli.CoreV1().Secrets(s.namespace).Delete(context.TODO(), "secret-name", metav1.DeleteOptions{}) }() _, err = s.cli.CoreV1().Secrets(s.namespace).Get(ctx, "secret-name", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pvc, err := s.cli.CoreV1().PersistentVolumeClaims(s.namespace).Get(ctx, s.pvc, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) dynCli := s.getDynamicClient(c, pvc) crCli := crfake.NewSimpleClientset() osCli := osfake.NewSimpleClientset() _, err = crCli.CrV1alpha1().Profiles(s.namespace).Create(ctx, prof, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = crCli.CrV1alpha1().Profiles(s.namespace).Get(ctx, "profName", metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) as := crv1alpha1.ActionSpec{ Object: crv1alpha1.ObjectReference{ Name: s.pvc, @@ -717,21 +717,21 @@ func (s *ParamsSuite) TestPhaseParams(c *C) { }, } tp, err := New(ctx, s.cli, dynCli, crCli, osCli, as) - c.Assert(err, IsNil) - c.Assert(tp.Phases, IsNil) + c.Assert(err, check.IsNil) + c.Assert(tp.Phases, check.IsNil) err = InitPhaseParams(ctx, s.cli, tp, "backup", nil) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) UpdatePhaseParams(ctx, tp, "backup", map[string]interface{}{"version": "0.75.0"}) UpdateDeferPhaseParams(ctx, tp, map[string]interface{}{"version": "0.75.0"}) // make sure output artifact is set in DeferPhase - c.Assert(tp.DeferPhase.Output, DeepEquals, map[string]interface{}{"version": "0.75.0"}) - c.Assert(tp.Phases, HasLen, 1) - c.Assert(tp.Phases["backup"], NotNil) - c.Assert(tp.Secrets, HasLen, 1) - c.Assert(tp.Secrets["actionSetSecret"], DeepEquals, *secret) + c.Assert(tp.DeferPhase.Output, check.DeepEquals, map[string]interface{}{"version": "0.75.0"}) + c.Assert(tp.Phases, check.HasLen, 1) + c.Assert(tp.Phases["backup"], check.NotNil) + c.Assert(tp.Secrets, check.HasLen, 1) + c.Assert(tp.Secrets["actionSetSecret"], check.DeepEquals, *secret) } -func (s *ParamsSuite) TestRenderingPhaseParams(c *C) { +func (s *ParamsSuite) TestRenderingPhaseParams(c *check.C) { ctx := context.Background() secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -753,7 +753,7 @@ func (s *ParamsSuite) TestRenderingPhaseParams(c *C) { } tp := TemplateParams{} err := InitPhaseParams(ctx, cli, &tp, "backup", secretRef) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) UpdatePhaseParams(ctx, &tp, "backup", map[string]interface{}{"replicas": 2}) for _, tc := range []struct { arg string @@ -773,11 +773,11 @@ func (s *ParamsSuite) TestRenderingPhaseParams(c *C) { }, } { t, err := template.New("config").Option("missingkey=error").Funcs(ksprig.TxtFuncMap()).Parse(tc.arg) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) buf := bytes.NewBuffer(nil) err = t.Execute(buf, tp) - c.Assert(err, IsNil) - c.Assert(buf.String(), Equals, tc.expected) + c.Assert(err, check.IsNil) + c.Assert(buf.String(), check.Equals, tc.expected) } } diff --git a/pkg/param/render_test.go b/pkg/param/render_test.go index 363539210c..e258afd1a6 100644 --- a/pkg/param/render_test.go +++ b/pkg/param/render_test.go @@ -17,39 +17,39 @@ package param import ( "time" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" ) type RenderSuite struct{} -var _ = Suite(&RenderSuite{}) +var _ = check.Suite(&RenderSuite{}) -func (s *RenderSuite) TestRender(c *C) { +func (s *RenderSuite) TestRender(c *check.C) { for _, tc := range []struct { arg interface{} tp TemplateParams out interface{} - checker Checker + checker check.Checker }{ { arg: "", tp: TemplateParams{}, out: "", - checker: IsNil, + checker: check.IsNil, }, { arg: "hello", tp: TemplateParams{}, out: "hello", - checker: IsNil, + checker: check.IsNil, }, { arg: "-", tp: TemplateParams{}, out: "-", - checker: IsNil, + checker: check.IsNil, }, { arg: "{{ .Options.hello }}", @@ -59,7 +59,7 @@ func (s *RenderSuite) TestRender(c *C) { }, }, out: "", - checker: IsNil, + checker: check.IsNil, }, { arg: "{{ .Options.hello }}", @@ -69,7 +69,7 @@ func (s *RenderSuite) TestRender(c *C) { }, }, out: "someValue", - checker: IsNil, + checker: check.IsNil, }, { // `-` cannot be used in a template path. @@ -80,7 +80,7 @@ func (s *RenderSuite) TestRender(c *C) { }, }, out: "", - checker: NotNil, + checker: check.NotNil, }, { // `-` can exist in artifact keys, it just cannot be used in path. @@ -92,31 +92,31 @@ func (s *RenderSuite) TestRender(c *C) { }, }, out: "someValue", - checker: IsNil, + checker: check.IsNil, }, { arg: "{{ upper `hello` }}", tp: TemplateParams{}, out: "HELLO", - checker: IsNil, + checker: check.IsNil, }, { arg: []string{"{{ upper `hello` }}"}, tp: TemplateParams{}, out: []interface{}{"HELLO"}, - checker: IsNil, + checker: check.IsNil, }, { arg: map[string]string{"name": "{{ upper `hello` }}"}, tp: TemplateParams{}, out: map[interface{}]interface{}{"name": "HELLO"}, - checker: IsNil, + checker: check.IsNil, }, { arg: map[string][]string{"{{ upper `hello` }}": {"{{ upper `hello` }}"}}, tp: TemplateParams{}, out: map[interface{}]interface{}{"HELLO": []interface{}{"HELLO"}}, - checker: IsNil, + checker: check.IsNil, }, { // Render should fail if referenced key doesn't exist @@ -124,19 +124,19 @@ func (s *RenderSuite) TestRender(c *C) { tp: TemplateParams{ Options: map[string]string{}, }, - checker: NotNil, + checker: check.NotNil, }, } { inArgs := map[string]interface{}{"arg": tc.arg} out, err := RenderArgs(inArgs, tc.tp) c.Assert(err, tc.checker) if err == nil { - c.Assert(out["arg"], DeepEquals, tc.out) + c.Assert(out["arg"], check.DeepEquals, tc.out) } } } -func (s *RenderSuite) TestRenderObjects(c *C) { +func (s *RenderSuite) TestRenderObjects(c *check.C) { tp := TemplateParams{ Time: time.Now().String(), Object: map[string]interface{}{ @@ -150,11 +150,11 @@ func (s *RenderSuite) TestRenderObjects(c *C) { }, } out, err := RenderObjectRefs(in, tp) - c.Assert(err, IsNil) - c.Assert(out["authSecret"].Name, Equals, "secret-name") + c.Assert(err, check.IsNil) + c.Assert(out["authSecret"].Name, check.Equals, "secret-name") } -func (s *RenderSuite) TestRenderArtifacts(c *C) { +func (s *RenderSuite) TestRenderArtifacts(c *check.C) { tp := TemplateParams{ Phases: map[string]*Phase{ "myPhase": { @@ -169,7 +169,7 @@ func (s *RenderSuite) TestRenderArtifacts(c *C) { art map[string]crv1alpha1.Artifact tp TemplateParams out map[string]crv1alpha1.Artifact - checker Checker + checker check.Checker }{ { art: map[string]crv1alpha1.Artifact{ @@ -183,7 +183,7 @@ func (s *RenderSuite) TestRenderArtifacts(c *C) { KopiaSnapshot: "a-snapshot-id", }, }, - checker: IsNil, + checker: check.IsNil, }, { @@ -202,11 +202,11 @@ func (s *RenderSuite) TestRenderArtifacts(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, } { ra, err := RenderArtifacts(tc.art, tc.tp) c.Assert(err, tc.checker) - c.Assert(ra, DeepEquals, tc.out) + c.Assert(ra, check.DeepEquals, tc.out) } } diff --git a/pkg/phase_test.go b/pkg/phase_test.go index c91fdb48ab..84316b178a 100644 --- a/pkg/phase_test.go +++ b/pkg/phase_test.go @@ -17,7 +17,7 @@ package kanister import ( "context" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" "github.com/kanisterio/kanister/pkg/param" @@ -27,7 +27,7 @@ import ( type PhaseSuite struct{} var ( - _ = Suite(&PhaseSuite{}) + _ = check.Suite(&PhaseSuite{}) _ Func = (*testFunc)(nil) ) @@ -77,7 +77,7 @@ func (tf *testFunc) ExecutionProgress() (crv1alpha1.PhaseProgress, error) { return crv1alpha1.PhaseProgress{ProgressPercent: tf.progressPercent}, nil } -func (s *PhaseSuite) TestExec(c *C) { +func (s *PhaseSuite) TestExec(c *check.C) { for _, tc := range []struct { artifact string argument string @@ -105,19 +105,19 @@ func (s *PhaseSuite) TestExec(c *C) { "testKey": tc.argument, } args, err := param.RenderArgs(rawArgs, tp) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) p := Phase{args: args, f: tf} _, err = p.Exec(context.Background(), crv1alpha1.Blueprint{}, "", tp) - c.Assert(err, IsNil) - c.Assert(output, Equals, tc.expected) + c.Assert(err, check.IsNil) + c.Assert(output, check.Equals, tc.expected) } } -func (s *PhaseSuite) TestCheckSupportedArgs(c *C) { +func (s *PhaseSuite) TestCheckSupportedArgs(c *check.C) { for _, tc := range []struct { supprtedArgs []string providedArgs map[string]interface{} - err Checker + err check.Checker expErr string }{ { @@ -127,7 +127,7 @@ func (s *PhaseSuite) TestCheckSupportedArgs(c *C) { "b": "val", "c": "val", }, - err: IsNil, + err: check.IsNil, }, { supprtedArgs: []string{"a", "b", "c"}, @@ -137,24 +137,24 @@ func (s *PhaseSuite) TestCheckSupportedArgs(c *C) { "c": "val", "d": "val", }, - err: NotNil, + err: check.NotNil, expErr: "argument d is not supported", }, { supprtedArgs: []string{"a", "b", "c"}, providedArgs: map[string]interface{}{}, - err: IsNil, + err: check.IsNil, }, } { err := utils.CheckSupportedArgs(tc.supprtedArgs, tc.providedArgs) if err != nil { - c.Assert(err.Error(), Equals, tc.expErr) + c.Assert(err.Error(), check.Equals, tc.expErr) } c.Assert(err, tc.err) } } -func (s *PhaseSuite) TestRegFuncVersion(c *C) { +func (s *PhaseSuite) TestRegFuncVersion(c *check.C) { for _, tc := range []struct { regWithVersion string expectedVersion string @@ -208,14 +208,14 @@ func (s *PhaseSuite) TestRegFuncVersion(c *C) { } { if tc.regWithVersion == "" { err := Register(tc.f) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } else { err := RegisterVersion(tc.f, tc.regWithVersion) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } semVer, err := regFuncVersion(tc.f.Name(), tc.queryVersion) - c.Assert(err, IsNil) - c.Assert(semVer.Original(), Equals, tc.expectedVersion) + c.Assert(err, check.IsNil) + c.Assert(semVer.Original(), check.Equals, tc.expectedVersion) } } diff --git a/pkg/poll/poll_test.go b/pkg/poll/poll_test.go index 4a4983e570..60dd382f29 100644 --- a/pkg/poll/poll_test.go +++ b/pkg/poll/poll_test.go @@ -23,18 +23,18 @@ import ( "time" "github.com/jpillora/backoff" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type PollSuite struct{} -var _ = Suite(&PollSuite{}) +var _ = check.Suite(&PollSuite{}) type mockPollFunc struct { - c *C + c *check.C res []pollFuncResult } @@ -58,10 +58,10 @@ func (mpf *mockPollFunc) Run(ctx context.Context) (bool, error) { var errFake = fmt.Errorf("THIS IS FAKE") -func (s *PollSuite) TestWaitWithBackoff(c *C) { +func (s *PollSuite) TestWaitWithBackoff(c *check.C) { for _, tc := range []struct { f mockPollFunc - checker Checker + checker check.Checker }{ { f: mockPollFunc{ @@ -70,7 +70,7 @@ func (s *PollSuite) TestWaitWithBackoff(c *C) { {ok: true, err: nil}, }, }, - checker: IsNil, + checker: check.IsNil, }, { f: mockPollFunc{ @@ -79,7 +79,7 @@ func (s *PollSuite) TestWaitWithBackoff(c *C) { {ok: false, err: errFake}, }, }, - checker: NotNil, + checker: check.NotNil, }, { f: mockPollFunc{ @@ -88,7 +88,7 @@ func (s *PollSuite) TestWaitWithBackoff(c *C) { {ok: true, err: errFake}, }, }, - checker: NotNil, + checker: check.NotNil, }, { f: mockPollFunc{ @@ -98,7 +98,7 @@ func (s *PollSuite) TestWaitWithBackoff(c *C) { {ok: true, err: nil}, }, }, - checker: IsNil, + checker: check.IsNil, }, { f: mockPollFunc{ @@ -108,7 +108,7 @@ func (s *PollSuite) TestWaitWithBackoff(c *C) { {ok: true, err: errFake}, }, }, - checker: NotNil, + checker: check.NotNil, }, } { ctx := context.Background() @@ -118,7 +118,7 @@ func (s *PollSuite) TestWaitWithBackoff(c *C) { } } -func (s *PollSuite) TestWaitWithBackoffCancellation(c *C) { +func (s *PollSuite) TestWaitWithBackoffCancellation(c *check.C) { f := func(context.Context) (bool, error) { return false, nil } @@ -129,10 +129,10 @@ func (s *PollSuite) TestWaitWithBackoffCancellation(c *C) { b := backoff.Backoff{} err := WaitWithBackoff(ctx, b, f) - c.Check(err, NotNil) + c.Check(err, check.NotNil) } -func (s *PollSuite) TestWaitWithRetriesTimeout(c *C) { +func (s *PollSuite) TestWaitWithRetriesTimeout(c *check.C) { // There's a better chance of catching a race condition // if there is only one thread maxprocs := runtime.GOMAXPROCS(1) @@ -152,11 +152,11 @@ func (s *PollSuite) TestWaitWithRetriesTimeout(c *C) { backoff := backoff.Backoff{} backoff.Min = 2 * time.Millisecond err := WaitWithBackoffWithRetries(ctx, backoff, 10, errf, f) - c.Check(err, NotNil) - c.Assert(err.Error(), Matches, ".*context deadline exceeded*") + c.Check(err, check.NotNil) + c.Assert(err.Error(), check.Matches, ".*context deadline exceeded*") } -func (s *PollSuite) TestWaitWithBackoffBackoff(c *C) { +func (s *PollSuite) TestWaitWithBackoffBackoff(c *check.C) { const numIterations = 10 i := 0 f := func(context.Context) (bool, error) { @@ -174,6 +174,6 @@ func (s *PollSuite) TestWaitWithBackoffBackoff(c *C) { now := time.Now() err := WaitWithBackoff(ctx, b, f) - c.Assert(err, IsNil) - c.Assert(time.Since(now) > (numIterations-1)*time.Millisecond, Equals, true) + c.Assert(err, check.IsNil) + c.Assert(time.Since(now) > (numIterations-1)*time.Millisecond, check.Equals, true) } diff --git a/pkg/progress/action_multi_actions_test.go b/pkg/progress/action_multi_actions_test.go index 03338b3464..86260d07f0 100644 --- a/pkg/progress/action_multi_actions_test.go +++ b/pkg/progress/action_multi_actions_test.go @@ -3,7 +3,7 @@ package progress import ( "context" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -16,9 +16,9 @@ type TestSuiteMultiActions struct { clientset *fake.Clientset } -var _ = Suite(&TestSuiteMultiActions{}) +var _ = check.Suite(&TestSuiteMultiActions{}) -func (s *TestSuiteMultiActions) SetUpTest(c *C) { +func (s *TestSuiteMultiActions) SetUpTest(c *check.C) { mockBlueprint := &crv1alpha1.Blueprint{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ @@ -118,21 +118,21 @@ func (s *TestSuiteMultiActions) SetUpTest(c *C) { s.clientset = fake.NewSimpleClientset() err := s.createFixtures(mockBlueprint, mockActionSet) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *TestSuiteMultiActions) TearDownTest(c *C) { +func (s *TestSuiteMultiActions) TearDownTest(c *check.C) { blueprintErr := s.clientset.CrV1alpha1().Blueprints(s.blueprint.GetNamespace()).Delete( context.Background(), s.blueprint.GetName(), metav1.DeleteOptions{}) - c.Assert(blueprintErr, IsNil) + c.Assert(blueprintErr, check.IsNil) actionSetErr := s.clientset.CrV1alpha1().ActionSets(s.actionSet.GetNamespace()).Delete( context.Background(), s.actionSet.GetName(), metav1.DeleteOptions{}) - c.Assert(actionSetErr, IsNil) + c.Assert(actionSetErr, check.IsNil) } func (s *TestSuiteMultiActions) createFixtures(blueprint *crv1alpha1.Blueprint, actionSet *crv1alpha1.ActionSet) error { @@ -157,7 +157,7 @@ func (s *TestSuiteMultiActions) createFixtures(blueprint *crv1alpha1.Blueprint, return nil } -func (s *TestSuiteMultiActions) TestUpdateActionsProgress(c *C) { +func (s *TestSuiteMultiActions) TestUpdateActionsProgress(c *check.C) { // This test simulates ActionSet consisting of two actions with two phases in each var testCases = []struct { indexAction int @@ -358,7 +358,7 @@ func (s *TestSuiteMultiActions) TestUpdateActionsProgress(c *C) { for id, tc := range testCases { // Get latest rev of actionset resource as, err := s.clientset.CrV1alpha1().ActionSets(s.actionSet.GetNamespace()).Get(context.Background(), s.actionSet.GetName(), metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) assertActionProgress( c, s.clientset, @@ -377,7 +377,7 @@ func (s *TestSuiteMultiActions) TestUpdateActionsProgress(c *C) { } } -func (s *TestSuiteMultiActions) TestUpdateActionsProgressWithFailures(c *C) { +func (s *TestSuiteMultiActions) TestUpdateActionsProgressWithFailures(c *check.C) { var testCases = []struct { indexAction int indexPhase int @@ -455,7 +455,7 @@ func (s *TestSuiteMultiActions) TestUpdateActionsProgressWithFailures(c *C) { for id, tc := range testCases { // Get latest rev of actionset resource as, err := s.clientset.CrV1alpha1().ActionSets(s.actionSet.GetNamespace()).Get(context.Background(), s.actionSet.GetName(), metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) assertActionProgress( c, s.clientset, diff --git a/pkg/progress/action_multi_phases_test.go b/pkg/progress/action_multi_phases_test.go index 80a69d3605..4cb4970b3f 100644 --- a/pkg/progress/action_multi_phases_test.go +++ b/pkg/progress/action_multi_phases_test.go @@ -3,7 +3,7 @@ package progress import ( "context" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -16,9 +16,9 @@ type TestSuiteMultiPhases struct { clientset *fake.Clientset } -var _ = Suite(&TestSuiteMultiPhases{}) +var _ = check.Suite(&TestSuiteMultiPhases{}) -func (s *TestSuiteMultiPhases) SetUpTest(c *C) { +func (s *TestSuiteMultiPhases) SetUpTest(c *check.C) { mockBlueprint := &crv1alpha1.Blueprint{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ @@ -83,21 +83,21 @@ func (s *TestSuiteMultiPhases) SetUpTest(c *C) { s.clientset = fake.NewSimpleClientset() err := s.createFixtures(mockBlueprint, mockActionSet) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *TestSuiteMultiPhases) TearDownTest(c *C) { +func (s *TestSuiteMultiPhases) TearDownTest(c *check.C) { blueprintErr := s.clientset.CrV1alpha1().Blueprints(s.blueprint.GetNamespace()).Delete( context.Background(), s.blueprint.GetName(), metav1.DeleteOptions{}) - c.Assert(blueprintErr, IsNil) + c.Assert(blueprintErr, check.IsNil) actionSetErr := s.clientset.CrV1alpha1().ActionSets(s.actionSet.GetNamespace()).Delete( context.Background(), s.actionSet.GetName(), metav1.DeleteOptions{}) - c.Assert(actionSetErr, IsNil) + c.Assert(actionSetErr, check.IsNil) } func (s *TestSuiteMultiPhases) createFixtures(blueprint *crv1alpha1.Blueprint, actionSet *crv1alpha1.ActionSet) error { @@ -122,7 +122,7 @@ func (s *TestSuiteMultiPhases) createFixtures(blueprint *crv1alpha1.Blueprint, a return nil } -func (s *TestSuiteMultiPhases) TestUpdateActionsProgress(c *C) { +func (s *TestSuiteMultiPhases) TestUpdateActionsProgress(c *check.C) { var testCases = []struct { indexAction int indexPhase int @@ -222,7 +222,7 @@ func (s *TestSuiteMultiPhases) TestUpdateActionsProgress(c *C) { for id, tc := range testCases { // Get latest rev of actionset resource as, err := s.clientset.CrV1alpha1().ActionSets(s.actionSet.GetNamespace()).Get(context.Background(), s.actionSet.GetName(), metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) assertActionProgress( c, s.clientset, @@ -241,7 +241,7 @@ func (s *TestSuiteMultiPhases) TestUpdateActionsProgress(c *C) { } } -func (s *TestSuiteMultiPhases) TestUpdateActionsProgressWithFailures(c *C) { +func (s *TestSuiteMultiPhases) TestUpdateActionsProgressWithFailures(c *check.C) { var testCases = []struct { indexAction int indexPhase int @@ -289,7 +289,7 @@ func (s *TestSuiteMultiPhases) TestUpdateActionsProgressWithFailures(c *C) { for id, tc := range testCases { // Get latest rev of actionset resource as, err := s.clientset.CrV1alpha1().ActionSets(s.actionSet.GetNamespace()).Get(context.Background(), s.actionSet.GetName(), metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) assertActionProgress( c, s.clientset, diff --git a/pkg/progress/action_single_phase_test.go b/pkg/progress/action_single_phase_test.go index 64dbcc21a3..ccd4a1e222 100644 --- a/pkg/progress/action_single_phase_test.go +++ b/pkg/progress/action_single_phase_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -20,7 +20,7 @@ const ( ) func Test(t *testing.T) { - TestingT(t) + check.TestingT(t) } type TestSuiteSinglePhase struct { @@ -29,9 +29,9 @@ type TestSuiteSinglePhase struct { clientset *fake.Clientset } -var _ = Suite(&TestSuiteSinglePhase{}) +var _ = check.Suite(&TestSuiteSinglePhase{}) -func (s *TestSuiteSinglePhase) SetUpTest(c *C) { +func (s *TestSuiteSinglePhase) SetUpTest(c *check.C) { mockBlueprint := &crv1alpha1.Blueprint{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ @@ -88,21 +88,21 @@ func (s *TestSuiteSinglePhase) SetUpTest(c *C) { s.clientset = fake.NewSimpleClientset() err := s.createFixtures(mockBlueprint, mockActionSet) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *TestSuiteSinglePhase) TearDownTest(c *C) { +func (s *TestSuiteSinglePhase) TearDownTest(c *check.C) { blueprintErr := s.clientset.CrV1alpha1().Blueprints(s.blueprint.GetNamespace()).Delete( context.Background(), s.blueprint.GetName(), metav1.DeleteOptions{}) - c.Assert(blueprintErr, IsNil) + c.Assert(blueprintErr, check.IsNil) actionSetErr := s.clientset.CrV1alpha1().ActionSets(s.actionSet.GetNamespace()).Delete( context.Background(), s.actionSet.GetName(), metav1.DeleteOptions{}) - c.Assert(actionSetErr, IsNil) + c.Assert(actionSetErr, check.IsNil) } func (s *TestSuiteSinglePhase) createFixtures(blueprint *crv1alpha1.Blueprint, actionSet *crv1alpha1.ActionSet) error { @@ -127,7 +127,7 @@ func (s *TestSuiteSinglePhase) createFixtures(blueprint *crv1alpha1.Blueprint, a return nil } -func (s *TestSuiteSinglePhase) TestUpdateActionPhaseProgress(c *C) { +func (s *TestSuiteSinglePhase) TestUpdateActionPhaseProgress(c *check.C) { var testCases = []struct { indexAction int indexPhase int @@ -212,7 +212,7 @@ func (s *TestSuiteSinglePhase) TestUpdateActionPhaseProgress(c *C) { } func assertActionProgress( - c *C, + c *check.C, clientset versioned.Interface, actionSet *crv1alpha1.ActionSet, indexAction int, @@ -230,24 +230,24 @@ func assertActionProgress( now := metav1.Now() actionSet.Status.Actions[indexAction].Phases[indexPhase].State = phaseState updated, err := clientset.CrV1alpha1().ActionSets(actionSet.GetNamespace()).Update(context.Background(), actionSet, metav1.UpdateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) phaseName := fmt.Sprintf("echo-hello-%d-%d", indexAction, indexPhase) phaseProgress.LastTransitionTime = &now err1 := updateActionSetStatus(context.Background(), clientset, actionSet, phaseName, phaseProgress) - c.Assert(err1, IsNil, Commentf("test case #: %d", testCaseID)) + c.Assert(err1, check.IsNil, check.Commentf("test case #: %d", testCaseID)) actual, err := clientset.CrV1alpha1().ActionSets(actionSet.GetNamespace()).Get(context.Background(), updated.GetName(), metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check phase progress percent - c.Assert(actual.Status.Actions[indexAction].Phases[indexPhase].Progress.ProgressPercent, Equals, expectedPhasePercent, Commentf("test case #: %d", testCaseID)) + c.Assert(actual.Status.Actions[indexAction].Phases[indexPhase].Progress.ProgressPercent, check.Equals, expectedPhasePercent, check.Commentf("test case #: %d", testCaseID)) // Check action progress percent - c.Assert(actual.Status.Progress.PercentCompleted, Equals, expectedActionPercent, Commentf("test case #: %d", testCaseID)) - c.Assert(actual.Status.Progress.SizeDownloadedB, Equals, expectedSizeDownloadedB, Commentf("test case #: %d", testCaseID)) - c.Assert(actual.Status.Progress.SizeUploadedB, Equals, expectedSizeUploadedB, Commentf("test case #: %d", testCaseID)) - c.Assert(actual.Status.Progress.EstimatedDownloadSizeB, Equals, expectedEstimatedDownloadSizeB, Commentf("test case #: %d", testCaseID)) - c.Assert(actual.Status.Progress.EstimatedUploadSizeB, Equals, expectedEstimatedUploadSizeB, Commentf("test case #: %d", testCaseID)) + c.Assert(actual.Status.Progress.PercentCompleted, check.Equals, expectedActionPercent, check.Commentf("test case #: %d", testCaseID)) + c.Assert(actual.Status.Progress.SizeDownloadedB, check.Equals, expectedSizeDownloadedB, check.Commentf("test case #: %d", testCaseID)) + c.Assert(actual.Status.Progress.SizeUploadedB, check.Equals, expectedSizeUploadedB, check.Commentf("test case #: %d", testCaseID)) + c.Assert(actual.Status.Progress.EstimatedDownloadSizeB, check.Equals, expectedEstimatedDownloadSizeB, check.Commentf("test case #: %d", testCaseID)) + c.Assert(actual.Status.Progress.EstimatedUploadSizeB, check.Equals, expectedEstimatedUploadSizeB, check.Commentf("test case #: %d", testCaseID)) if phaseState != crv1alpha1.StateFailed && phaseState != crv1alpha1.StatePending { - c.Assert(actual.Status.Actions[indexAction].Phases[indexPhase].Progress.LastTransitionTime, NotNil) - c.Assert(*actual.Status.Actions[indexAction].Phases[indexPhase].Progress.LastTransitionTime, Equals, now, Commentf("test case #: %d", testCaseID)) + c.Assert(actual.Status.Actions[indexAction].Phases[indexPhase].Progress.LastTransitionTime, check.NotNil) + c.Assert(*actual.Status.Actions[indexAction].Phases[indexPhase].Progress.LastTransitionTime, check.Equals, now, check.Commentf("test case #: %d", testCaseID)) } } diff --git a/pkg/reconcile/reconcile_test.go b/pkg/reconcile/reconcile_test.go index 657e410e6c..0598307ef5 100644 --- a/pkg/reconcile/reconcile_test.go +++ b/pkg/reconcile/reconcile_test.go @@ -19,7 +19,7 @@ import ( "sync" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -31,7 +31,7 @@ import ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type ReconcileSuite struct { cli kubernetes.Interface @@ -40,18 +40,18 @@ type ReconcileSuite struct { as *crv1alpha1.ActionSet } -var _ = Suite(&ReconcileSuite{}) +var _ = check.Suite(&ReconcileSuite{}) -func (s *ReconcileSuite) SetUpSuite(c *C) { +func (s *ReconcileSuite) SetUpSuite(c *check.C) { // Setup Clients config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cli, err := kubernetes.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli crCli, err := crclientv1alpha1.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.crCli = crCli // Create Namespace @@ -61,7 +61,7 @@ func (s *ReconcileSuite) SetUpSuite(c *C) { }, } cns, err := cli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name // Create ActionSet @@ -96,35 +96,35 @@ func (s *ReconcileSuite) SetUpSuite(c *C) { }, } as, err = s.crCli.ActionSets(s.namespace).Create(context.TODO(), as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.as = as } -func (s *ReconcileSuite) TearDownSuite(c *C) { +func (s *ReconcileSuite) TearDownSuite(c *check.C) { if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } } -func (s *ReconcileSuite) TestSetFailed(c *C) { +func (s *ReconcileSuite) TestSetFailed(c *check.C) { ctx := context.Background() err := ActionSet(ctx, s.crCli, s.namespace, s.as.GetName(), func(as *crv1alpha1.ActionSet) error { as.Status.Actions[0].Phases[0].State = crv1alpha1.StateFailed as.Status.State = crv1alpha1.StateFailed return nil }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) as, err := s.crCli.ActionSets(s.namespace).Get(ctx, s.as.GetName(), metav1.GetOptions{}) - c.Assert(err, IsNil) - c.Assert(as.Status.State, Equals, crv1alpha1.StateFailed) + c.Assert(err, check.IsNil) + c.Assert(as.Status.State, check.Equals, crv1alpha1.StateFailed) } // Tested with 30, but it took 20 seconds to run. This takes 2 seconds and we // still see conflicts. const numParallel = 5 -func (s *ReconcileSuite) TestHandleConflict(c *C) { +func (s *ReconcileSuite) TestHandleConflict(c *check.C) { ctx := context.Background() wg := sync.WaitGroup{} for range make([]struct{}, numParallel) { @@ -136,7 +136,7 @@ func (s *ReconcileSuite) TestHandleConflict(c *C) { as.Status.State = crv1alpha1.StateFailed return nil }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() } wg.Wait() diff --git a/pkg/resource/resource_test.go b/pkg/resource/resource_test.go index fc7959d42c..10e403c6c7 100644 --- a/pkg/resource/resource_test.go +++ b/pkg/resource/resource_test.go @@ -18,7 +18,7 @@ import ( "context" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -29,18 +29,18 @@ import ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type ResourceSuite struct { cli kubernetes.Interface namespace string } -var _ = Suite(&ResourceSuite{}) +var _ = check.Suite(&ResourceSuite{}) -func (s *ResourceSuite) SetUpSuite(c *C) { +func (s *ResourceSuite) SetUpSuite(c *check.C) { cli, err := kube.NewClient() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli = cli ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -48,11 +48,11 @@ func (s *ResourceSuite) SetUpSuite(c *C) { }, } cns, err := s.cli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.Name } -func (s *ResourceSuite) TearDownSuite(c *C) { +func (s *ResourceSuite) TearDownSuite(c *check.C) { if s.namespace != "" { _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) } @@ -60,87 +60,87 @@ func (s *ResourceSuite) TearDownSuite(c *C) { var emptyGetOptions metav1.GetOptions -func (s *ResourceSuite) TestActionSetClient(c *C) { +func (s *ResourceSuite) TestActionSetClient(c *check.C) { ctx := context.Background() config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = CreateCustomResources(ctx, config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) name := "testactionset" cli, err := crclientv1alpha1.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) as := &crv1alpha1.ActionSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, } as1, err := cli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) - c.Assert(as, NotNil) + c.Assert(err, check.IsNil) + c.Assert(as, check.NotNil) as2, err := cli.ActionSets(s.namespace).Get(ctx, name, emptyGetOptions) - c.Assert(err, IsNil) - c.Assert(as1, DeepEquals, as2) + c.Assert(err, check.IsNil) + c.Assert(as1, check.DeepEquals, as2) as2.Spec = &crv1alpha1.ActionSetSpec{} as3, err := cli.ActionSets(s.namespace).Update(ctx, as2, metav1.UpdateOptions{}) - c.Assert(err, IsNil) - c.Assert(as1.Spec, IsNil) - c.Assert(as3.Spec, NotNil) + c.Assert(err, check.IsNil) + c.Assert(as1.Spec, check.IsNil) + c.Assert(as3.Spec, check.NotNil) as4, err := cli.ActionSets(s.namespace).Get(ctx, name, emptyGetOptions) - c.Assert(err, IsNil) - c.Assert(as4, DeepEquals, as3) + c.Assert(err, check.IsNil) + c.Assert(as4, check.DeepEquals, as3) err = cli.ActionSets(s.namespace).Delete(ctx, name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = cli.ActionSets(s.namespace).Get(ctx, name, emptyGetOptions) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (s *ResourceSuite) TestBlueprintClient(c *C) { +func (s *ResourceSuite) TestBlueprintClient(c *check.C) { ctx := context.Background() config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = CreateCustomResources(ctx, config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) name := "testblueprint" cli, err := crclientv1alpha1.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) bp := &crv1alpha1.Blueprint{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, } bp1, err := cli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) - c.Assert(bp, NotNil) + c.Assert(err, check.IsNil) + c.Assert(bp, check.NotNil) bp2, err := cli.Blueprints(s.namespace).Get(ctx, name, emptyGetOptions) - c.Assert(err, IsNil) - c.Assert(bp1, DeepEquals, bp2) + c.Assert(err, check.IsNil) + c.Assert(bp1, check.DeepEquals, bp2) bp2.Actions = map[string]*crv1alpha1.BlueprintAction{ "backup": { Name: "takebackup", }, } bp3, err := cli.Blueprints(s.namespace).Update(ctx, bp2, metav1.UpdateOptions{}) - c.Assert(err, IsNil) - c.Assert(bp1.Actions, IsNil) - c.Assert(bp3.Actions, NotNil) + c.Assert(err, check.IsNil) + c.Assert(bp1.Actions, check.IsNil) + c.Assert(bp3.Actions, check.NotNil) bp4, err := cli.Blueprints(s.namespace).Get(ctx, name, emptyGetOptions) - c.Assert(err, IsNil) - c.Assert(bp4, DeepEquals, bp3) + c.Assert(err, check.IsNil) + c.Assert(bp4, check.DeepEquals, bp3) err = cli.Blueprints(s.namespace).Delete(ctx, name, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = cli.Blueprints(s.namespace).Get(ctx, name, emptyGetOptions) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } diff --git a/pkg/restic/restic_test.go b/pkg/restic/restic_test.go index c04fb4681a..a87b994ddd 100644 --- a/pkg/restic/restic_test.go +++ b/pkg/restic/restic_test.go @@ -17,7 +17,7 @@ package restic import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" @@ -28,15 +28,15 @@ import ( type ResticDataSuite struct{} -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } -var _ = Suite(&ResticDataSuite{}) +var _ = check.Suite(&ResticDataSuite{}) -func (s *ResticDataSuite) TestGetSnapshotIDFromTag(c *C) { +func (s *ResticDataSuite) TestGetSnapshotIDFromTag(c *check.C) { for _, tc := range []struct { log string expected string - checker Checker + checker check.Checker }{ { log: `[ @@ -80,15 +80,15 @@ func (s *ResticDataSuite) TestGetSnapshotIDFromTag(c *C) { expected: "7c0bfeb9", checker: IsNil, }, - {log: `null`, expected: "", checker: NotNil}, + {log: `null`, expected: "", checker: check.NotNil}, } { id, err := SnapshotIDFromSnapshotLog(tc.log) c.Assert(err, tc.checker) - c.Assert(id, Equals, tc.expected) + c.Assert(id, check.Equals, tc.expected) } } -func (s *ResticDataSuite) TestGetSnapshotID(c *C) { +func (s *ResticDataSuite) TestGetSnapshotID(c *check.C) { for _, tc := range []struct { log string expected string @@ -99,11 +99,11 @@ func (s *ResticDataSuite) TestGetSnapshotID(c *C) { {"snapshot abc123\n saved", ""}, } { id := SnapshotIDFromBackupLog(tc.log) - c.Check(id, Equals, tc.expected, Commentf("Failed for log: %s", tc.log)) + c.Check(id, check.Equals, tc.expected, check.Commentf("Failed for log: %s", tc.log)) } } -func (s *ResticDataSuite) TestResticArgs(c *C) { +func (s *ResticDataSuite) TestResticArgs(c *check.C) { for _, tc := range []struct { profile *param.Profile repo string @@ -267,15 +267,15 @@ func (s *ResticDataSuite) TestResticArgs(c *C) { }, } { args, err := resticArgs(tc.profile, tc.repo, tc.password) - c.Assert(err, IsNil) - c.Assert(args, DeepEquals, tc.expected) + c.Assert(err, check.IsNil) + c.Assert(args, check.DeepEquals, tc.expected) } } -func (s *ResticDataSuite) TestResticArgsWithAWSRole(c *C) { +func (s *ResticDataSuite) TestResticArgsWithAWSRole(c *check.C) { for _, tc := range []struct { profile *param.Profile - output Checker + output check.Checker }{ { profile: ¶m.Profile{ @@ -295,7 +295,7 @@ func (s *ResticDataSuite) TestResticArgsWithAWSRole(c *C) { }, }, }, - output: IsNil, + output: check.IsNil, }, { profile: ¶m.Profile{ @@ -315,7 +315,7 @@ func (s *ResticDataSuite) TestResticArgsWithAWSRole(c *C) { }, }, }, - output: NotNil, + output: check.NotNil, }, } { _, err := resticArgs(tc.profile, "repo", "my-secret") @@ -323,7 +323,7 @@ func (s *ResticDataSuite) TestResticArgsWithAWSRole(c *C) { } } -func (s *ResticDataSuite) TestGetSnapshotStatsFromStatsLog(c *C) { +func (s *ResticDataSuite) TestGetSnapshotStatsFromStatsLog(c *check.C) { for _, tc := range []struct { log string expectedfc string @@ -336,12 +336,12 @@ func (s *ResticDataSuite) TestGetSnapshotStatsFromStatsLog(c *C) { {log: " Total Size: 10.322 KiB", expectedfc: "", expectedsize: "10.322 KiB"}, } { _, fc, s := SnapshotStatsFromStatsLog(tc.log) - c.Assert(fc, Equals, tc.expectedfc) - c.Assert(s, Equals, tc.expectedsize) + c.Assert(fc, check.Equals, tc.expectedfc) + c.Assert(s, check.Equals, tc.expectedsize) } } -func (s *ResticDataSuite) TestGetSnapshotStatsModeFromStatsLog(c *C) { +func (s *ResticDataSuite) TestGetSnapshotStatsModeFromStatsLog(c *check.C) { for _, tc := range []struct { log string expected string @@ -354,11 +354,11 @@ func (s *ResticDataSuite) TestGetSnapshotStatsModeFromStatsLog(c *C) { {log: "sudhufehfuijbfjbruifhoiwhf", expected: ""}, } { mode := SnapshotStatsModeFromStatsLog(tc.log) - c.Assert(mode, Equals, tc.expected) + c.Assert(mode, check.Equals, tc.expected) } } -func (s *ResticDataSuite) TestIsPasswordIncorrect(c *C) { +func (s *ResticDataSuite) TestIsPasswordIncorrect(c *check.C) { for _, tc := range []struct { log string expected bool @@ -370,11 +370,11 @@ Is there a repository at the following location? s3:s3.amazonaws.com/abhdbhf/foodbar`, expected: false}, } { output := IsPasswordIncorrect(tc.log) - c.Assert(output, Equals, tc.expected) + c.Assert(output, check.Equals, tc.expected) } } -func (s *ResticDataSuite) TestDoesRepoExist(c *C) { +func (s *ResticDataSuite) TestDoesRepoExist(c *check.C) { for _, tc := range []struct { log string expected bool @@ -386,11 +386,11 @@ Is there a repository at the following location? s3:s3.amazonaws.com/abhdbhf/foodbar`, expected: true}, } { output := DoesRepoExist(tc.log) - c.Assert(output, Equals, tc.expected) + c.Assert(output, check.Equals, tc.expected) } } -func (s *ResticDataSuite) TestGetSnapshotStatsFromBackupLog(c *C) { +func (s *ResticDataSuite) TestGetSnapshotStatsFromBackupLog(c *check.C) { for _, tc := range []struct { log string expectedfc string @@ -450,13 +450,13 @@ func (s *ResticDataSuite) TestGetSnapshotStatsFromBackupLog(c *C) { } { c.Log(tc.log) fc, s, phy := SnapshotStatsFromBackupLog(tc.log) - c.Check(fc, Equals, tc.expectedfc) - c.Check(s, Equals, tc.expectedsize) - c.Check(phy, Equals, tc.expectedphy) + c.Check(fc, check.Equals, tc.expectedfc) + c.Check(s, check.Equals, tc.expectedsize) + c.Check(phy, check.Equals, tc.expectedphy) } } -func (s *ResticDataSuite) TestGetSpaceFreedFromPruneLog(c *C) { +func (s *ResticDataSuite) TestGetSpaceFreedFromPruneLog(c *check.C) { for _, tc := range []struct { log string expectedSpaceFreed string @@ -492,11 +492,11 @@ func (s *ResticDataSuite) TestGetSpaceFreedFromPruneLog(c *C) { {log: "Some unrelated log in the same line, will delete 100 packs and rewrite 100 packs, this frees 11.235 B", expectedSpaceFreed: ""}, } { spaceFreed := SpaceFreedFromPruneLog(tc.log) - c.Check(spaceFreed, Equals, tc.expectedSpaceFreed) + c.Check(spaceFreed, check.Equals, tc.expectedSpaceFreed) } } -func (s *ResticDataSuite) TestResticSizeStringParser(c *C) { +func (s *ResticDataSuite) TestResticSizeStringParser(c *check.C) { for _, tc := range []struct { input string expectedSizeB int64 @@ -527,6 +527,6 @@ func (s *ResticDataSuite) TestResticSizeStringParser(c *C) { {input: "GiB 1.1235", expectedSizeB: 0}, } { parsedSize := ParseResticSizeStringBytes(tc.input) - c.Check(parsedSize, Equals, tc.expectedSizeB) + c.Check(parsedSize, check.Equals, tc.expectedSizeB) } } diff --git a/pkg/secrets/aws_test.go b/pkg/secrets/aws_test.go index a355f06302..60de933417 100644 --- a/pkg/secrets/aws_test.go +++ b/pkg/secrets/aws_test.go @@ -18,7 +18,7 @@ import ( "context" "github.com/aws/aws-sdk-go/aws/credentials" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" "github.com/kanisterio/kanister/pkg/aws" @@ -27,13 +27,13 @@ import ( type AWSSecretSuite struct{} -var _ = Suite(&AWSSecretSuite{}) +var _ = check.Suite(&AWSSecretSuite{}) -func (s *AWSSecretSuite) TestExtractAWSCredentials(c *C) { +func (s *AWSSecretSuite) TestExtractAWSCredentials(c *check.C) { tcs := []struct { secret *corev1.Secret expected *credentials.Value - errChecker Checker + errChecker check.Checker }{ { secret: &corev1.Secret{ @@ -48,14 +48,14 @@ func (s *AWSSecretSuite) TestExtractAWSCredentials(c *C) { SecretAccessKey: "secret_key", ProviderName: credentials.StaticProviderName, }, - errChecker: IsNil, + errChecker: check.IsNil, }, { secret: &corev1.Secret{ Type: "Opaque", }, expected: nil, - errChecker: NotNil, + errChecker: check.NotNil, }, { secret: &corev1.Secret{ @@ -65,7 +65,7 @@ func (s *AWSSecretSuite) TestExtractAWSCredentials(c *C) { }, }, expected: nil, - errChecker: NotNil, + errChecker: check.NotNil, }, { secret: &corev1.Secret{ @@ -75,7 +75,7 @@ func (s *AWSSecretSuite) TestExtractAWSCredentials(c *C) { }, }, expected: nil, - errChecker: NotNil, + errChecker: check.NotNil, }, { secret: &corev1.Secret{ @@ -87,20 +87,20 @@ func (s *AWSSecretSuite) TestExtractAWSCredentials(c *C) { }, }, expected: nil, - errChecker: NotNil, + errChecker: check.NotNil, }, } for testNum, tc := range tcs { creds, err := ExtractAWSCredentials(context.Background(), tc.secret, aws.AssumeRoleDurationDefault) - c.Check(creds, DeepEquals, tc.expected, Commentf("test number: %d", testNum)) + c.Check(creds, check.DeepEquals, tc.expected, check.Commentf("test number: %d", testNum)) c.Check(err, tc.errChecker) } } -func (s *AWSSecretSuite) TestExtractAWSCredentialsWithSessionToken(c *C) { +func (s *AWSSecretSuite) TestExtractAWSCredentialsWithSessionToken(c *check.C) { for _, tc := range []struct { secret *corev1.Secret - output Checker + output check.Checker }{ { secret: &corev1.Secret{ @@ -111,7 +111,7 @@ func (s *AWSSecretSuite) TestExtractAWSCredentialsWithSessionToken(c *C) { ConfigRole: []byte(config.GetEnvOrSkip(c, "role")), }, }, - output: IsNil, + output: check.IsNil, }, { secret: &corev1.Secret{ @@ -122,7 +122,7 @@ func (s *AWSSecretSuite) TestExtractAWSCredentialsWithSessionToken(c *C) { ConfigRole: []byte("arn:aws:iam::000000000000:role/test-fake-role"), }, }, - output: NotNil, + output: check.NotNil, }, } { _, err := ExtractAWSCredentials(context.Background(), tc.secret, aws.AssumeRoleDurationDefault) diff --git a/pkg/secrets/azure_test.go b/pkg/secrets/azure_test.go index d3afc13517..2d69bf91b3 100644 --- a/pkg/secrets/azure_test.go +++ b/pkg/secrets/azure_test.go @@ -15,7 +15,7 @@ package secrets import ( - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" "github.com/kanisterio/kanister/pkg/objectstore" @@ -23,13 +23,13 @@ import ( type AzureSecretSuite struct{} -var _ = Suite(&AzureSecretSuite{}) +var _ = check.Suite(&AzureSecretSuite{}) -func (s *AzureSecretSuite) TestExtractAzureCredentials(c *C) { +func (s *AzureSecretSuite) TestExtractAzureCredentials(c *check.C) { for i, tc := range []struct { secret *corev1.Secret expected *objectstore.SecretAzure - errChecker Checker + errChecker check.Checker }{ { secret: &corev1.Secret{ @@ -45,7 +45,7 @@ func (s *AzureSecretSuite) TestExtractAzureCredentials(c *C) { StorageKey: "secret_key", EnvironmentName: "env", }, - errChecker: IsNil, + errChecker: check.IsNil, }, { // bad type secret: &corev1.Secret{ @@ -57,7 +57,7 @@ func (s *AzureSecretSuite) TestExtractAzureCredentials(c *C) { }, }, expected: nil, - errChecker: NotNil, + errChecker: check.NotNil, }, { // missing field secret: &corev1.Secret{ @@ -68,7 +68,7 @@ func (s *AzureSecretSuite) TestExtractAzureCredentials(c *C) { }, }, expected: nil, - errChecker: NotNil, + errChecker: check.NotNil, }, { // additional field secret: &corev1.Secret{ @@ -81,11 +81,11 @@ func (s *AzureSecretSuite) TestExtractAzureCredentials(c *C) { }, }, expected: nil, - errChecker: NotNil, + errChecker: check.NotNil, }, } { azsecret, err := ExtractAzureCredentials(tc.secret) - c.Check(azsecret, DeepEquals, tc.expected, Commentf("test number: %d", i)) + c.Check(azsecret, check.DeepEquals, tc.expected, check.Commentf("test number: %d", i)) c.Check(err, tc.errChecker) } } diff --git a/pkg/secrets/gcp_test.go b/pkg/secrets/gcp_test.go index b8fbb85072..1fe66bc9d4 100644 --- a/pkg/secrets/gcp_test.go +++ b/pkg/secrets/gcp_test.go @@ -18,7 +18,7 @@ import ( "encoding/base64" "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -27,14 +27,14 @@ import ( type GCPSecretSuite struct{} -var _ = Suite(&GCPSecretSuite{}) +var _ = check.Suite(&GCPSecretSuite{}) -func (s *GCPSecretSuite) TestValidateGCPCredentials(c *C) { +func (s *GCPSecretSuite) TestValidateGCPCredentials(c *check.C) { serviceAccountJSON := make([]byte, base64.StdEncoding.EncodedLen(len([]byte("service_account_json")))) base64.StdEncoding.Encode(serviceAccountJSON, []byte("service_account_json")) for i, tc := range []struct { secret *corev1.Secret - errChecker Checker + errChecker check.Checker expectedErr error }{ { @@ -49,7 +49,7 @@ func (s *GCPSecretSuite) TestValidateGCPCredentials(c *C) { GCPServiceAccountJSONKey: serviceAccountJSON, }, }, - errChecker: IsNil, + errChecker: check.IsNil, expectedErr: nil, }, { // Incompatible secret type @@ -64,7 +64,7 @@ func (s *GCPSecretSuite) TestValidateGCPCredentials(c *C) { GCPServiceAccountJSONKey: serviceAccountJSON, }, }, - errChecker: NotNil, + errChecker: check.NotNil, expectedErr: errors.Wrapf(secerrors.ErrValidate, secerrors.IncompatibleSecretTypeErrorMsg, GCPSecretType, "ns", "sec"), }, { // missing field - GCPServiceKey @@ -79,7 +79,7 @@ func (s *GCPSecretSuite) TestValidateGCPCredentials(c *C) { }, }, expectedErr: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, GCPServiceAccountJSONKey, "ns", "sec"), - errChecker: NotNil, + errChecker: check.NotNil, }, { // missing field - GCPProjectID secret: &corev1.Secret{ @@ -93,7 +93,7 @@ func (s *GCPSecretSuite) TestValidateGCPCredentials(c *C) { }, }, expectedErr: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, GCPProjectID, "ns", "sec"), - errChecker: NotNil, + errChecker: check.NotNil, }, { // secret is Empty secret: &corev1.Secret{ @@ -104,17 +104,17 @@ func (s *GCPSecretSuite) TestValidateGCPCredentials(c *C) { }, }, expectedErr: errors.Wrapf(secerrors.ErrValidate, secerrors.EmptySecretErrorMessage, "ns", "sec"), - errChecker: NotNil, + errChecker: check.NotNil, }, { // secret is nil secret: nil, expectedErr: errors.Wrapf(secerrors.ErrValidate, secerrors.NilSecretErrorMessage), - errChecker: NotNil, + errChecker: check.NotNil, }, } { err := ValidateGCPCredentials(tc.secret) if err != nil { - c.Check(err.Error(), Equals, tc.expectedErr.Error(), Commentf("test number: %d", i)) + c.Check(err.Error(), check.Equals, tc.expectedErr.Error(), check.Commentf("test number: %d", i)) } } } diff --git a/pkg/secrets/repositoryserver/aws_secrets_test.go b/pkg/secrets/repositoryserver/aws_secrets_test.go index 8cb1e9cc56..1b5914fb82 100644 --- a/pkg/secrets/repositoryserver/aws_secrets_test.go +++ b/pkg/secrets/repositoryserver/aws_secrets_test.go @@ -16,7 +16,7 @@ package repositoryserver import ( "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,12 +25,12 @@ import ( type AWSSecretCredsSuite struct{} -var _ = Suite(&AWSSecretCredsSuite{}) +var _ = check.Suite(&AWSSecretCredsSuite{}) -func (s *AWSSecretCredsSuite) TestValidateRepoServerAWSCredentials(c *C) { +func (s *AWSSecretCredsSuite) TestValidateRepoServerAWSCredentials(c *check.C) { for i, tc := range []struct { secret Secret - errChecker Checker + errChecker check.Checker expectedError error }{ { // Valid AWS Secret @@ -45,7 +45,7 @@ func (s *AWSSecretCredsSuite) TestValidateRepoServerAWSCredentials(c *C) { RegionKey: []byte("region"), }, }), - errChecker: IsNil, + errChecker: check.IsNil, }, { // Missing required field - Region Key secret: NewAWSLocation(&corev1.Secret{ @@ -58,7 +58,7 @@ func (s *AWSSecretCredsSuite) TestValidateRepoServerAWSCredentials(c *C) { BucketKey: []byte("bucket"), }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, RegionKey, "ns", "sec"), }, { // Missing required field - Bucket Key @@ -72,7 +72,7 @@ func (s *AWSSecretCredsSuite) TestValidateRepoServerAWSCredentials(c *C) { RegionKey: []byte("region"), }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, BucketKey, "ns", "sec"), }, { // Empty Secret @@ -83,19 +83,19 @@ func (s *AWSSecretCredsSuite) TestValidateRepoServerAWSCredentials(c *C) { Namespace: "ns", }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.EmptySecretErrorMessage, "ns", "sec"), }, { // Nil Secret secret: NewAWSLocation(nil), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.NilSecretErrorMessage), }, } { err := tc.secret.Validate() c.Check(err, tc.errChecker) if err != nil { - c.Check(err.Error(), Equals, tc.expectedError.Error(), Commentf("test number: %d", i)) + c.Check(err.Error(), check.Equals, tc.expectedError.Error(), check.Commentf("test number: %d", i)) } } } diff --git a/pkg/secrets/repositoryserver/azure_secrets_test.go b/pkg/secrets/repositoryserver/azure_secrets_test.go index 985b06db4d..14925dc027 100644 --- a/pkg/secrets/repositoryserver/azure_secrets_test.go +++ b/pkg/secrets/repositoryserver/azure_secrets_test.go @@ -16,7 +16,7 @@ package repositoryserver import ( "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,12 +25,12 @@ import ( type AzureSecretCredsSuite struct{} -var _ = Suite(&AzureSecretCredsSuite{}) +var _ = check.Suite(&AzureSecretCredsSuite{}) -func (s *AzureSecretCredsSuite) TestValidateRepoServerAzureCredentials(c *C) { +func (s *AzureSecretCredsSuite) TestValidateRepoServerAzureCredentials(c *check.C) { for i, tc := range []struct { secret Secret - errChecker Checker + errChecker check.Checker expectedError error }{ { // Valid Azure Secret @@ -45,7 +45,7 @@ func (s *AzureSecretCredsSuite) TestValidateRepoServerAzureCredentials(c *C) { RegionKey: []byte("region"), }, }), - errChecker: IsNil, + errChecker: check.IsNil, }, { // Missing required field - Bucket Key secret: NewAzureLocation(&corev1.Secret{ @@ -58,7 +58,7 @@ func (s *AzureSecretCredsSuite) TestValidateRepoServerAzureCredentials(c *C) { RegionKey: []byte("region"), }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, BucketKey, "ns", "sec"), }, { // Empty Secret @@ -69,12 +69,12 @@ func (s *AzureSecretCredsSuite) TestValidateRepoServerAzureCredentials(c *C) { Namespace: "ns", }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.EmptySecretErrorMessage, "ns", "sec"), }, { // Nil Secret secret: NewAzureLocation(nil), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.NilSecretErrorMessage), }, } { @@ -82,7 +82,7 @@ func (s *AzureSecretCredsSuite) TestValidateRepoServerAzureCredentials(c *C) { c.Check(err, tc.errChecker) if err != nil { - c.Check(err.Error(), Equals, tc.expectedError.Error(), Commentf("test number: %d", i)) + c.Check(err.Error(), check.Equals, tc.expectedError.Error(), check.Commentf("test number: %d", i)) } } } diff --git a/pkg/secrets/repositoryserver/gcp_secrets_test.go b/pkg/secrets/repositoryserver/gcp_secrets_test.go index 01dbe80f6d..078fcfef21 100644 --- a/pkg/secrets/repositoryserver/gcp_secrets_test.go +++ b/pkg/secrets/repositoryserver/gcp_secrets_test.go @@ -16,7 +16,7 @@ package repositoryserver import ( "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,12 +25,12 @@ import ( type GCPSecretCredsSuite struct{} -var _ = Suite(&GCPSecretCredsSuite{}) +var _ = check.Suite(&GCPSecretCredsSuite{}) -func (s *GCPSecretCredsSuite) TestValidateRepoServerGCPCredentials(c *C) { +func (s *GCPSecretCredsSuite) TestValidateRepoServerGCPCredentials(c *check.C) { for i, tc := range []struct { secret Secret - errChecker Checker + errChecker check.Checker expectedError error }{ { // Valid GCP Secret @@ -45,7 +45,7 @@ func (s *GCPSecretCredsSuite) TestValidateRepoServerGCPCredentials(c *C) { RegionKey: []byte("region"), }, }), - errChecker: IsNil, + errChecker: check.IsNil, }, { // Missing required field - Bucket Key secret: NewGCPLocation(&corev1.Secret{ @@ -58,7 +58,7 @@ func (s *GCPSecretCredsSuite) TestValidateRepoServerGCPCredentials(c *C) { RegionKey: []byte("region"), }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, BucketKey, "ns", "sec"), }, { // Empty Secret @@ -69,19 +69,19 @@ func (s *GCPSecretCredsSuite) TestValidateRepoServerGCPCredentials(c *C) { Namespace: "ns", }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.EmptySecretErrorMessage, "ns", "sec"), }, { // Nil Secret secret: NewGCPLocation(nil), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.NilSecretErrorMessage), }, } { err := tc.secret.Validate() c.Check(err, tc.errChecker) if err != nil { - c.Check(err.Error(), Equals, tc.expectedError.Error(), Commentf("test number: %d", i)) + c.Check(err.Error(), check.Equals, tc.expectedError.Error(), check.Commentf("test number: %d", i)) } } } diff --git a/pkg/secrets/repositoryserver/repository_password_test.go b/pkg/secrets/repositoryserver/repository_password_test.go index d0c76107b4..e2a9c68d7e 100644 --- a/pkg/secrets/repositoryserver/repository_password_test.go +++ b/pkg/secrets/repositoryserver/repository_password_test.go @@ -16,7 +16,7 @@ package repositoryserver import ( "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,12 +25,12 @@ import ( type RepositoryPasswordSecretSuite struct{} -var _ = Suite(&RepositoryPasswordSecretSuite{}) +var _ = check.Suite(&RepositoryPasswordSecretSuite{}) -func (s *GCPSecretCredsSuite) TestValidateRepositoryPassword(c *C) { +func (s *GCPSecretCredsSuite) TestValidateRepositoryPassword(c *check.C) { for i, tc := range []struct { secret Secret - errChecker Checker + errChecker check.Checker expectedError error }{ { // Valid Repository Password Secret @@ -44,7 +44,7 @@ func (s *GCPSecretCredsSuite) TestValidateRepositoryPassword(c *C) { RepoPasswordKey: []byte("repopassword"), }, }), - errChecker: IsNil, + errChecker: check.IsNil, }, { // Missing required field - Repo Password Key secret: NewRepoPassword(&corev1.Secret{ @@ -57,7 +57,7 @@ func (s *GCPSecretCredsSuite) TestValidateRepositoryPassword(c *C) { BucketKey: []byte("bucketkey"), }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, RepoPasswordKey, "ns", "sec"), }, { // Secret should contain only 1 key value pair @@ -72,7 +72,7 @@ func (s *GCPSecretCredsSuite) TestValidateRepositoryPassword(c *C) { RepoPasswordKey: []byte("repopassword"), }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.UnknownFieldErrorMsg, "ns", "sec"), }, { // Empty Secret @@ -83,19 +83,19 @@ func (s *GCPSecretCredsSuite) TestValidateRepositoryPassword(c *C) { Namespace: "ns", }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.EmptySecretErrorMessage, "ns", "sec"), }, { // Nil Secret secret: NewRepoPassword(nil), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.NilSecretErrorMessage), }, } { err := tc.secret.Validate() c.Check(err, tc.errChecker) if err != nil { - c.Check(err.Error(), Equals, tc.expectedError.Error(), Commentf("test number: %d", i)) + c.Check(err.Error(), check.Equals, tc.expectedError.Error(), check.Commentf("test number: %d", i)) } } } diff --git a/pkg/secrets/repositoryserver/repository_server_admin_credentials_test.go b/pkg/secrets/repositoryserver/repository_server_admin_credentials_test.go index ad4799453b..568ca4c8c4 100644 --- a/pkg/secrets/repositoryserver/repository_server_admin_credentials_test.go +++ b/pkg/secrets/repositoryserver/repository_server_admin_credentials_test.go @@ -16,7 +16,7 @@ package repositoryserver import ( "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,12 +25,12 @@ import ( type ServerAdminCredentialsSecretSuite struct{} -var _ = Suite(&ServerAdminCredentialsSecretSuite{}) +var _ = check.Suite(&ServerAdminCredentialsSecretSuite{}) -func (s *GCPSecretCredsSuite) TestValidateRepositoryServerAdminCredentials(c *C) { +func (s *GCPSecretCredsSuite) TestValidateRepositoryServerAdminCredentials(c *check.C) { for i, tc := range []struct { secret Secret - errChecker Checker + errChecker check.Checker expectedError error }{ { // Valid Repository Server Admin Credentials Secret @@ -45,7 +45,7 @@ func (s *GCPSecretCredsSuite) TestValidateRepositoryServerAdminCredentials(c *C) AdminPasswordKey: []byte("adminpassword"), }, }), - errChecker: IsNil, + errChecker: check.IsNil, }, { // Missing required field - AdminUsernameKey secret: NewRepositoryServerAdminCredentials(&corev1.Secret{ @@ -59,7 +59,7 @@ func (s *GCPSecretCredsSuite) TestValidateRepositoryServerAdminCredentials(c *C) BucketKey: []byte("bucketkey"), }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, AdminUsernameKey, "ns", "sec"), }, { // Missing required field - AdminPasswordKey @@ -74,7 +74,7 @@ func (s *GCPSecretCredsSuite) TestValidateRepositoryServerAdminCredentials(c *C) BucketKey: []byte("bucketkey"), }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, AdminPasswordKey, "ns", "sec"), }, { // Secret should contain only 2 key value pairs @@ -90,7 +90,7 @@ func (s *GCPSecretCredsSuite) TestValidateRepositoryServerAdminCredentials(c *C) BucketKey: []byte("invalidkey"), }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.UnknownFieldErrorMsg, "ns", "sec"), }, { // Empty Secret @@ -101,19 +101,19 @@ func (s *GCPSecretCredsSuite) TestValidateRepositoryServerAdminCredentials(c *C) Namespace: "ns", }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.EmptySecretErrorMessage, "ns", "sec"), }, { // Nil Secret secret: NewRepositoryServerAdminCredentials(nil), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.NilSecretErrorMessage), }, } { err := tc.secret.Validate() c.Check(err, tc.errChecker) if err != nil { - c.Check(err.Error(), Equals, tc.expectedError.Error(), Commentf("test number: %d", i)) + c.Check(err.Error(), check.Equals, tc.expectedError.Error(), check.Commentf("test number: %d", i)) } } } diff --git a/pkg/secrets/repositoryserver/s3compliant_secrets_test.go b/pkg/secrets/repositoryserver/s3compliant_secrets_test.go index 29b14450d3..745dceedba 100644 --- a/pkg/secrets/repositoryserver/s3compliant_secrets_test.go +++ b/pkg/secrets/repositoryserver/s3compliant_secrets_test.go @@ -16,7 +16,7 @@ package repositoryserver import ( "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,12 +25,12 @@ import ( type S3CompliantSecretTestSuite struct{} -var _ = Suite(&S3CompliantSecretTestSuite{}) +var _ = check.Suite(&S3CompliantSecretTestSuite{}) -func (s *S3CompliantSecretTestSuite) TestValidateRepoServerS3CompliantCredentials(c *C) { +func (s *S3CompliantSecretTestSuite) TestValidateRepoServerS3CompliantCredentials(c *check.C) { for i, tc := range []struct { secret Secret - errChecker Checker + errChecker check.Checker expectedError error }{ { // Valid S3 Compatible Secret @@ -46,7 +46,7 @@ func (s *S3CompliantSecretTestSuite) TestValidateRepoServerS3CompliantCredential EndpointKey: []byte("endpoint"), }, }), - errChecker: IsNil, + errChecker: check.IsNil, }, { // Missing required field - Bucket Key secret: NewS3CompliantLocation(&corev1.Secret{ @@ -60,7 +60,7 @@ func (s *S3CompliantSecretTestSuite) TestValidateRepoServerS3CompliantCredential EndpointKey: []byte("endpoint"), }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, BucketKey, "ns", "sec"), }, { // Missing required field - Region Key @@ -75,7 +75,7 @@ func (s *S3CompliantSecretTestSuite) TestValidateRepoServerS3CompliantCredential EndpointKey: []byte("endpoint"), }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, RegionKey, "ns", "sec"), }, { // Missing required field - Endpoint Key @@ -90,7 +90,7 @@ func (s *S3CompliantSecretTestSuite) TestValidateRepoServerS3CompliantCredential BucketKey: []byte("bucket"), }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, EndpointKey, "ns", "sec"), }, { // Empty Secret @@ -101,19 +101,19 @@ func (s *S3CompliantSecretTestSuite) TestValidateRepoServerS3CompliantCredential Namespace: "ns", }, }), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.EmptySecretErrorMessage, "ns", "sec"), }, { // Nil Secret secret: NewS3CompliantLocation(nil), - errChecker: NotNil, + errChecker: check.NotNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.NilSecretErrorMessage), }, } { err := tc.secret.Validate() c.Check(err, tc.errChecker) if err != nil { - c.Check(err.Error(), Equals, tc.expectedError.Error(), Commentf("test number: %d", i)) + c.Check(err.Error(), check.Equals, tc.expectedError.Error(), check.Commentf("test number: %d", i)) } } } diff --git a/pkg/secrets/repositoryserver/secrets_test.go b/pkg/secrets/repositoryserver/secrets_test.go index 8d2a5acdac..fb8f1bff63 100644 --- a/pkg/secrets/repositoryserver/secrets_test.go +++ b/pkg/secrets/repositoryserver/secrets_test.go @@ -16,7 +16,7 @@ package repositoryserver import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } diff --git a/pkg/secrets/secrets_test.go b/pkg/secrets/secrets_test.go index 55975c3948..27b27bb54e 100644 --- a/pkg/secrets/secrets_test.go +++ b/pkg/secrets/secrets_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,17 +26,17 @@ import ( "github.com/kanisterio/kanister/pkg/secrets/repositoryserver" ) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type SecretUtilsSuite struct{} -var _ = Suite(&SecretUtilsSuite{}) +var _ = check.Suite(&SecretUtilsSuite{}) -func (s *SecretUtilsSuite) TestGetLocationSecret(c *C) { +func (s *SecretUtilsSuite) TestGetLocationSecret(c *check.C) { for i, tc := range []struct { secret *corev1.Secret - errChecker Checker - locationSecretChecker Checker + errChecker check.Checker + locationSecretChecker check.Checker expectedError error }{ { // Valid secret type @@ -46,8 +46,8 @@ func (s *SecretUtilsSuite) TestGetLocationSecret(c *C) { repositoryserver.TypeKey: []byte(repositoryserver.LocTypeGCS), }, }, - errChecker: IsNil, - locationSecretChecker: NotNil, + errChecker: check.IsNil, + locationSecretChecker: check.NotNil, expectedError: nil, }, { // Valid secret type @@ -57,8 +57,8 @@ func (s *SecretUtilsSuite) TestGetLocationSecret(c *C) { repositoryserver.TypeKey: []byte(repositoryserver.LocTypeAzure), }, }, - errChecker: IsNil, - locationSecretChecker: NotNil, + errChecker: check.IsNil, + locationSecretChecker: check.NotNil, expectedError: nil, }, { // Valid secret type @@ -68,8 +68,8 @@ func (s *SecretUtilsSuite) TestGetLocationSecret(c *C) { repositoryserver.TypeKey: []byte(repositoryserver.LocTypeS3), }, }, - errChecker: IsNil, - locationSecretChecker: NotNil, + errChecker: check.IsNil, + locationSecretChecker: check.NotNil, expectedError: nil, }, { // Valid secret type @@ -79,8 +79,8 @@ func (s *SecretUtilsSuite) TestGetLocationSecret(c *C) { repositoryserver.TypeKey: []byte(repositoryserver.LocTypeFilestore), }, }, - errChecker: IsNil, - locationSecretChecker: NotNil, + errChecker: check.IsNil, + locationSecretChecker: check.NotNil, expectedError: nil, }, { // Missing location type @@ -91,8 +91,8 @@ func (s *SecretUtilsSuite) TestGetLocationSecret(c *C) { Namespace: "ns", }, }, - errChecker: NotNil, - locationSecretChecker: IsNil, + errChecker: check.NotNil, + locationSecretChecker: check.IsNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.MissingRequiredFieldErrorMsg, repositoryserver.TypeKey, "ns", "sec"), }, { // Unsupported location type @@ -106,8 +106,8 @@ func (s *SecretUtilsSuite) TestGetLocationSecret(c *C) { repositoryserver.TypeKey: []byte("invalid"), }, }, - errChecker: NotNil, - locationSecretChecker: IsNil, + errChecker: check.NotNil, + locationSecretChecker: check.IsNil, expectedError: errors.Wrapf(secerrors.ErrValidate, secerrors.UnsupportedLocationTypeErrorMsg, "invalid", "ns", "sec"), }, } { @@ -115,7 +115,7 @@ func (s *SecretUtilsSuite) TestGetLocationSecret(c *C) { c.Check(err, tc.errChecker) c.Check(rsecret, tc.locationSecretChecker) if err != nil { - c.Check(err.Error(), Equals, tc.expectedError.Error(), Commentf("test number: %d", i)) + c.Check(err.Error(), check.Equals, tc.expectedError.Error(), check.Commentf("test number: %d", i)) } } } diff --git a/pkg/testing/e2e_test.go b/pkg/testing/e2e_test.go index 0fa85748ef..17ef519cd3 100644 --- a/pkg/testing/e2e_test.go +++ b/pkg/testing/e2e_test.go @@ -24,13 +24,14 @@ import ( "time" "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" crclient "github.com/kanisterio/kanister/pkg/client/clientset/versioned/typed/cr/v1alpha1" + "github.com/kanisterio/kanister/pkg/consts" "github.com/kanisterio/kanister/pkg/controller" "github.com/kanisterio/kanister/pkg/function" "github.com/kanisterio/kanister/pkg/kube" @@ -46,16 +47,16 @@ type E2ESuite struct { cancel context.CancelFunc } -var _ = Suite(&E2ESuite{}) +var _ = check.Suite(&E2ESuite{}) -func (s *E2ESuite) SetUpSuite(c *C) { +func (s *E2ESuite) SetUpSuite(c *check.C) { // Instantiate Client SDKs cfg, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli, err = kubernetes.NewForConfig(cfg) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.crCli, err = crclient.NewForConfig(cfg) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Create a new test namespace ns := &corev1.Namespace{ @@ -64,47 +65,47 @@ func (s *E2ESuite) SetUpSuite(c *C) { }, } cns, err := s.cli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.namespace = cns.GetName() // Start the controller ctx := context.Background() ctx, s.cancel = context.WithCancel(ctx) err = resource.CreateCustomResources(ctx, cfg) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctlr := controller.New(cfg, nil) err = ctlr.StartWatch(ctx, s.namespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *E2ESuite) TearDownSuite(c *C) { +func (s *E2ESuite) TearDownSuite(c *check.C) { if s.namespace != "" { err := s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } if s.cancel != nil { s.cancel() } } -func (s *E2ESuite) TestKubeExec(c *C) { +func (s *E2ESuite) TestKubeExec(c *check.C) { ctx, can := context.WithTimeout(context.Background(), 60*time.Second) defer can() // Create a test Deployment d, err := s.cli.AppsV1().Deployments(s.namespace).Create(ctx, testutil.NewTestDeployment(1), metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnDeploymentReady(ctx, s.cli, s.namespace, d.GetName()) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Create test Profile and secret sec := testutil.NewTestProfileSecret() sec, err = s.cli.CoreV1().Secrets(s.namespace).Create(ctx, sec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) p := testutil.NewTestProfile(s.namespace, sec.GetName()) p, err = s.crCli.Profiles(s.namespace).Create(ctx, p, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Create a simple Blueprint bp := &crv1alpha1.Blueprint{ @@ -130,7 +131,7 @@ func (s *E2ESuite) TestKubeExec(c *C) { }, } bp, err = s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Create an ActionSet as := &crv1alpha1.ActionSet{ @@ -156,7 +157,7 @@ func (s *E2ESuite) TestKubeExec(c *C) { }, } as, err = s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Wait for the ActionSet to complete. err = poll.Wait(ctx, func(ctx context.Context) (bool, error) { @@ -171,19 +172,19 @@ func (s *E2ESuite) TestKubeExec(c *C) { } return false, nil }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) log.Print("Completed E2E TestKubeExec") } -func (s *E2ESuite) TestKubeTask(c *C) { +func (s *E2ESuite) TestKubeTask(c *check.C) { ctx, can := context.WithTimeout(context.Background(), 30*time.Second) defer can() // Create a test Deployment d, err := s.cli.AppsV1().Deployments(s.namespace).Create(ctx, testutil.NewTestDeployment(1), metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = kube.WaitOnDeploymentReady(ctx, s.cli, s.namespace, d.GetName()) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Create test Profile and secret sec := &corev1.Secret{ @@ -196,7 +197,7 @@ func (s *E2ESuite) TestKubeTask(c *C) { }, } sec, err = s.cli.CoreV1().Secrets(s.namespace).Create(ctx, sec, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) p := &crv1alpha1.Profile{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-profile-", @@ -217,7 +218,7 @@ func (s *E2ESuite) TestKubeTask(c *C) { }, } p, err = s.crCli.Profiles(s.namespace).Create(ctx, p, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Create a simple Blueprint bp := &crv1alpha1.Blueprint{ @@ -232,7 +233,7 @@ func (s *E2ESuite) TestKubeTask(c *C) { Func: function.KubeTaskFuncName, Name: "test-kube-task", Args: map[string]interface{}{ - "image": "ghcr.io/kanisterio/kanister-tools:0.110.0", + "image": consts.LatestKanisterToolsImage, "namespace": "{{ .Deployment.Namespace }}", "command": []string{"echo", "default specs"}, "podOverride": map[string]interface{}{ @@ -251,7 +252,7 @@ func (s *E2ESuite) TestKubeTask(c *C) { }, } bp, err = s.crCli.Blueprints(s.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Create an ActionSet as := &crv1alpha1.ActionSet{ @@ -280,7 +281,7 @@ func (s *E2ESuite) TestKubeTask(c *C) { }, } as, err = s.crCli.ActionSets(s.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Wait for the ActionSet to complete. err = poll.Wait(ctx, func(ctx context.Context) (bool, error) { @@ -295,13 +296,13 @@ func (s *E2ESuite) TestKubeTask(c *C) { } return false, nil }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) log.Print("Completed E2E TestKubeTask") } -func (s *E2ESuite) TestPodLabelsAndAnnotations(c *C) { +func (s *E2ESuite) TestPodLabelsAndAnnotations(c *check.C) { bp, err := s.crCli.Blueprints(s.namespace).Create(context.Background(), blueprintWithPodFunctions(), metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // 1. scenario where the labels/annotations are provided via actionset as well as blueprint asPodLabels := map[string]string{ @@ -312,32 +313,32 @@ func (s *E2ESuite) TestPodLabelsAndAnnotations(c *C) { } as := backupActionsetWihtPodLabelsAndAnnotations(s.namespace, bp.Name, asPodAnn, asPodLabels) asCreated, err := s.crCli.ActionSets(s.namespace).Create(context.Background(), as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForFunctionPodReady() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctx := context.Background() pods, err := s.cli.CoreV1().Pods("default").List(ctx, metav1.ListOptions{ LabelSelector: "createdBy=kanister", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = verifyLabelsInFunctionPod(pods.Items[0].Labels, map[string]string{ "asLabKeyOne": "asLabValOne", "bpLabKeyOne": "bpLabValueOne", "labKey": "labValue", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = verifyAnnotationsInFunctionPod(pods.Items[0].Annotations, map[string]string{ "asAnnKeyOne": "asAnnValOne", "bpAnnKeyOne": "bpAnnValueOne", "annKey": "annValue", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForActionSetComplete(asCreated.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // 2. scenario where labels/annotations are provided via actionset as well blueprint // and same key is present at both places. @@ -349,30 +350,30 @@ func (s *E2ESuite) TestPodLabelsAndAnnotations(c *C) { "labKey": "asLabValue", // this label is present in blueprint as well but with diff value (labValue) }) asCreatedOne, err := s.crCli.ActionSets(s.namespace).Create(context.Background(), asOne, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForFunctionPodReady() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pods, err = s.cli.CoreV1().Pods("default").List(ctx, metav1.ListOptions{ LabelSelector: "createdBy=kanister", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = verifyLabelsInFunctionPod(pods.Items[0].Labels, map[string]string{ "asLabKeyOne": "asLabValOne", "bpLabKeyOne": "bpLabValueOne", "labKey": "labValue", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = verifyAnnotationsInFunctionPod(pods.Items[0].Annotations, map[string]string{ "asAnnKeyOne": "asAnnValOne", "bpAnnKeyOne": "bpAnnValueOne", "annKey": "annValue", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForActionSetComplete(asCreatedOne.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // 3. scenario where labels/annotations are present at both places (actionset, blueprint) and no common key is present asTwo := backupActionsetWihtPodLabelsAndAnnotations(s.namespace, bp.Name, map[string]string{ @@ -383,65 +384,65 @@ func (s *E2ESuite) TestPodLabelsAndAnnotations(c *C) { "asLabKeyTwo": "asLabValTwo", }) asCreatedTwo, err := s.crCli.ActionSets(s.namespace).Create(context.Background(), asTwo, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForFunctionPodReady() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pods, err = s.cli.CoreV1().Pods("default").List(ctx, metav1.ListOptions{ LabelSelector: "createdBy=kanister", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = verifyLabelsInFunctionPod(pods.Items[0].Labels, map[string]string{ "asLabKeyOne": "asLabValOne", "asLabKeyTwo": "asLabValTwo", "bpLabKeyOne": "bpLabValueOne", "labKey": "labValue", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = verifyAnnotationsInFunctionPod(pods.Items[0].Annotations, map[string]string{ "asAnnKeyOne": "asAnnValOne", "asAnnKeyTwo": "asAnnValTwo", "bpAnnKeyOne": "bpAnnValueOne", "annKey": "annValue", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForActionSetComplete(asCreatedTwo.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // 4. scenario where labels/annotations are only provided via blueprint asThree := backupActionsetWihtPodLabelsAndAnnotations(s.namespace, bp.Name, nil, nil) asCreatedThree, err := s.crCli.ActionSets(s.namespace).Create(context.Background(), asThree, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForFunctionPodReady() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pods, err = s.cli.CoreV1().Pods("default").List(ctx, metav1.ListOptions{ LabelSelector: "createdBy=kanister", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = verifyLabelsInFunctionPod(pods.Items[0].Labels, map[string]string{ "bpLabKeyOne": "bpLabValueOne", "labKey": "labValue", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = verifyAnnotationsInFunctionPod(pods.Items[0].Annotations, map[string]string{ "bpAnnKeyOne": "bpAnnValueOne", "annKey": "annValue", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForActionSetComplete(asCreatedThree.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // 5. scenario where labels/annotations are only provided via actionset bpObj := blueprintWithPodFunctions() bpObj.Actions["backup"].Phases[0].Args["podLabels"] = map[string]string{} bpObj.Actions["backup"].Phases[0].Args["podAnnotations"] = map[string]string{} bp, err = s.crCli.Blueprints(s.namespace).Create(context.Background(), bpObj, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) asFour := backupActionsetWihtPodLabelsAndAnnotations(s.namespace, bp.Name, map[string]string{ "asAnnKeyOne": "asAnnValOne", @@ -450,32 +451,32 @@ func (s *E2ESuite) TestPodLabelsAndAnnotations(c *C) { "asLabKeyOne": "asLabValOne", }) asCreatedFour, err := s.crCli.ActionSets(s.namespace).Create(context.Background(), asFour, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForFunctionPodReady() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pods, err = s.cli.CoreV1().Pods("default").List(ctx, metav1.ListOptions{ LabelSelector: "createdBy=kanister", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = verifyLabelsInFunctionPod(pods.Items[0].Labels, map[string]string{ "asLabKeyOne": "asLabValOne", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = verifyAnnotationsInFunctionPod(pods.Items[0].Annotations, map[string]string{ "asAnnKeyOne": "asAnnValOne", "asAnnKeyTwo": "asAnnValTwo", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForActionSetComplete(asCreatedFour.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // test restore actionset bpObj = blueprintWithPodFunctions() bp, err = s.crCli.Blueprints(s.namespace).Create(context.Background(), bpObj, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) asRestore := restoreActionsetWithPodLabelsAndAnnotations(s.namespace, bp.Name, map[string]string{ "asAnnKeyOne": "asAnnValOne", @@ -484,37 +485,37 @@ func (s *E2ESuite) TestPodLabelsAndAnnotations(c *C) { "asLabKeyOne": "asLabValOne", }) asRestoreCreated, err := s.crCli.ActionSets(s.namespace).Create(context.Background(), asRestore, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForFunctionPodReady() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) pods, err = s.cli.CoreV1().Pods("default").List(ctx, metav1.ListOptions{ LabelSelector: "createdBy=kanister", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = verifyLabelsInFunctionPod(pods.Items[0].Labels, map[string]string{ "bpLabKeyOne": "bpLabValueOne", "labKey": "labValue", "asLabKeyOne": "asLabValOne", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = verifyAnnotationsInFunctionPod(pods.Items[0].Annotations, map[string]string{ "asAnnKeyOne": "asAnnValOne", "asAnnKeyTwo": "asAnnValTwo", "bpAnnKeyOne": "bpAnnValueOne", "annKey": "annValue", }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = s.waitForActionSetComplete(asRestoreCreated.Name) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) log.Print("Completed E2E TestPodLabelsAndAnnotations") } func (s *E2ESuite) waitForActionSetComplete(asName string) error { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) defer cancel() return poll.Wait(ctx, func(ctx context.Context) (bool, error) { @@ -624,7 +625,7 @@ func blueprintWithPodFunctions() *crv1alpha1.Blueprint { Func: function.KubeTaskFuncName, Name: "backupphase-one", Args: map[string]interface{}{ - "image": "ghcr.io/kanisterio/kanister-tools:0.110.0", + "image": consts.LatestKanisterToolsImage, "namespace": "default", "command": []string{"sleep", "10"}, "podLabels": map[string]interface{}{ @@ -645,7 +646,7 @@ func blueprintWithPodFunctions() *crv1alpha1.Blueprint { Func: function.KubeTaskFuncName, Name: "restorephase-one", Args: map[string]interface{}{ - "image": "ghcr.io/kanisterio/kanister-tools:0.110.0", + "image": consts.LatestKanisterToolsImage, "namespace": "default", "command": []string{"sleep", "10"}, "podLabels": map[string]interface{}{ diff --git a/pkg/testing/helm/helm_test.go b/pkg/testing/helm/helm_test.go index aa96208291..029483fa75 100644 --- a/pkg/testing/helm/helm_test.go +++ b/pkg/testing/helm/helm_test.go @@ -19,7 +19,7 @@ import ( "fmt" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,10 +29,10 @@ import ( "github.com/kanisterio/kanister/pkg/kube" ) -var _ = Suite(&HelmTestSuite{}) +var _ = check.Suite(&HelmTestSuite{}) func Test(t *testing.T) { - TestingT(t) + check.TestingT(t) } const ( @@ -46,12 +46,12 @@ type HelmTestSuite struct { helmApp HelmApp } -func (h *HelmTestSuite) SetUpSuite(c *C) { +func (h *HelmTestSuite) SetUpSuite(c *check.C) { cfg, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cli, err := kubernetes.NewForConfig(cfg) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) h.kubeClient = cli h.deploymentName = fmt.Sprintf("%s-%s", kanisterName, "kanister-operator") @@ -61,26 +61,26 @@ func (h *HelmTestSuite) SetUpSuite(c *C) { } kanisterApp, err := NewHelmApp(helmValues, kanisterName, "kanister/kanister-operator", kanisterName, "", false) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // add kanister repo err = kanisterApp.AddRepo(kanisterName, kanisterChartURL) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) h.helmApp = *kanisterApp } -func (h *HelmTestSuite) TestUpgrade(c *C) { +func (h *HelmTestSuite) TestUpgrade(c *check.C) { ctx := context.Background() // install released version of kanister c.Log("Installing kanister release") // TODO: Use manifests to test the helm charts _, err := h.helmApp.Install() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // wait for kanister deployment to be ready err = kube.WaitOnDeploymentReady(ctx, h.kubeClient, h.helmApp.namespace, h.deploymentName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) c.Log("Upgrading the kanister release with local chart and updated image") // upgrade the installed application @@ -88,29 +88,29 @@ func (h *HelmTestSuite) TestUpgrade(c *C) { "image.tag": "v9.99.9-dev", "bpValidatingWebhook.enabled": "false", } - c.Assert(h.helmApp.Upgrade("../../../helm/kanister-operator", updatedValues), IsNil) + c.Assert(h.helmApp.Upgrade("../../../helm/kanister-operator", updatedValues), check.IsNil) // wait for kanister deployment to be ready err = kube.WaitOnDeploymentReady(ctx, h.kubeClient, h.helmApp.namespace, h.deploymentName) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (h *HelmTestSuite) TestResourcesFromManifestAfterDryRunHelmInstall(c *C) { +func (h *HelmTestSuite) TestResourcesFromManifestAfterDryRunHelmInstall(c *check.C) { defer func() { h.helmApp.dryRun = false }() c.Log("Installing kanister release - Dry run") h.helmApp.dryRun = true out, err := h.helmApp.Install() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Fetch all resources resources := helm.ResourcesFromRenderedManifest(out, nil) - c.Assert(len(resources) > 0, Equals, true) + c.Assert(len(resources) > 0, check.Equals, true) } // TestSelectedDeploymentAttrFromKanisterHelmDryRunInstall test case does a dry run install of the `kanister` helm chart and validates // use cases for `nodeSelector` and `toleration` attributes in the helmValues.yaml. This function is specific to `deployment` resource. -func (h *HelmTestSuite) TestSelectedDeploymentAttrFromKanisterHelmDryRunInstall(c *C) { +func (h *HelmTestSuite) TestSelectedDeploymentAttrFromKanisterHelmDryRunInstall(c *check.C) { nodeSelector := map[string]string{ "selector-key": "selector-value", } @@ -179,30 +179,30 @@ func (h *HelmTestSuite) TestSelectedDeploymentAttrFromKanisterHelmDryRunInstall( }() // Installing kanister release from local kanister-operator - Dry run" testApp, err := NewHelmApp(tc.helmValues, kanisterName, "../../../helm/kanister-operator", kanisterName, "", true) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) out, err := testApp.Install() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) resources := helm.ResourcesFromRenderedManifest(out, func(kind helm.K8sObjectType) bool { return kind == helm.K8sObjectTypeDeployment }) - c.Assert(len(resources) > 0, Equals, true) + c.Assert(len(resources) > 0, check.Equals, true) // Take the deployment resources deployments, err := helm.K8sObjectsFromRenderedResources[*appsv1.Deployment](resources) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Use only the required deployment var obj = deployments[h.deploymentName] - c.Assert(obj, NotNil) + c.Assert(obj, check.NotNil) - c.Assert(obj.Spec.Template.Spec.NodeSelector, DeepEquals, tc.expectedNodeSelector) - c.Assert(obj.Spec.Template.Spec.Tolerations, DeepEquals, tc.expectedTolerations) + c.Assert(obj.Spec.Template.Spec.NodeSelector, check.DeepEquals, tc.expectedNodeSelector) + c.Assert(obj.Spec.Template.Spec.Tolerations, check.DeepEquals, tc.expectedTolerations) } } -func (h *HelmTestSuite) TearDownSuite(c *C) { +func (h *HelmTestSuite) TearDownSuite(c *check.C) { c.Log("Uninstalling chart") err := h.helmApp.Uninstall() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Uninstall doesn't delete namespace, delete namespace separately - c.Assert(h.kubeClient.CoreV1().Namespaces().Delete(context.Background(), h.helmApp.namespace, metav1.DeleteOptions{}), IsNil) + c.Assert(h.kubeClient.CoreV1().Namespaces().Delete(context.Background(), h.helmApp.namespace, metav1.DeleteOptions{}), check.IsNil) } diff --git a/pkg/testing/integration_register.go b/pkg/testing/integration_register.go index bad3fad0be..64ed1b555a 100644 --- a/pkg/testing/integration_register.go +++ b/pkg/testing/integration_register.go @@ -18,9 +18,9 @@ package testing import ( - "github.com/kanisterio/kanister/pkg/app" + "gopkg.in/check.v1" - . "gopkg.in/check.v1" + "github.com/kanisterio/kanister/pkg/app" ) // Register Applications to Integration Suite @@ -30,7 +30,7 @@ type PITRPostgreSQL struct { IntegrationSuite } -var _ = Suite(&PITRPostgreSQL{ +var _ = check.Suite(&PITRPostgreSQL{ IntegrationSuite{ name: "pitr-postgres", namespace: "pitr-postgres-test", @@ -45,7 +45,7 @@ type PostgreSQL struct { IntegrationSuite } -var _ = Suite(&PostgreSQL{ +var _ = check.Suite(&PostgreSQL{ IntegrationSuite{ name: "postgres", namespace: "postgres-test", @@ -60,7 +60,7 @@ type MySQL struct { IntegrationSuite } -var _ = Suite(&MySQL{ +var _ = check.Suite(&MySQL{ IntegrationSuite{ name: "mysql", namespace: "mysql-test", @@ -75,7 +75,7 @@ type CockroachDB struct { IntegrationSuite } -var _ = Suite(&CockroachDB{ +var _ = check.Suite(&CockroachDB{ IntegrationSuite{ name: "cockroachdb", namespace: "cockroachdb-test", @@ -90,7 +90,7 @@ type TimeLogCSI struct { IntegrationSuite } -var _ = Suite(&TimeLogCSI{ +var _ = check.Suite(&TimeLogCSI{ IntegrationSuite{ name: "time-logger", namespace: "time-log", @@ -105,7 +105,7 @@ type Maria struct { IntegrationSuite } -var _ = Suite(&Maria{ +var _ = check.Suite(&Maria{ IntegrationSuite{ name: "mariadb", namespace: "mariadb-test", @@ -120,7 +120,7 @@ type Elasticsearch struct { IntegrationSuite } -var _ = Suite(&Elasticsearch{ +var _ = check.Suite(&Elasticsearch{ IntegrationSuite{ name: "elasticsearch", namespace: "es-test", @@ -135,7 +135,7 @@ type MongoDB struct { IntegrationSuite } -var _ = Suite(&MongoDB{ +var _ = check.Suite(&MongoDB{ IntegrationSuite{ name: "mongo", namespace: "mongo-test", @@ -150,7 +150,7 @@ type Cassandra struct { IntegrationSuite } -var _ = Suite(&Cassandra{IntegrationSuite{ +var _ = check.Suite(&Cassandra{IntegrationSuite{ name: "cassandra", namespace: "cassandra-test", app: app.NewCassandraInstance("cassandra"), @@ -164,7 +164,7 @@ type Couchbase struct { IntegrationSuite } -var _ = Suite(&Couchbase{ +var _ = check.Suite(&Couchbase{ IntegrationSuite{ name: "couchbase", namespace: "couchbase-test", @@ -179,7 +179,7 @@ type RDSPostgreSQL struct { IntegrationSuite } -var _ = Suite(&RDSPostgreSQL{ +var _ = check.Suite(&RDSPostgreSQL{ IntegrationSuite{ name: "rds-postgres", namespace: "rds-postgres-test", @@ -194,7 +194,7 @@ type FoundationDB struct { IntegrationSuite } -var _ = Suite(&FoundationDB{ +var _ = check.Suite(&FoundationDB{ IntegrationSuite{ name: "foundationdb", namespace: "fdb-test", @@ -209,7 +209,7 @@ type RDSAuroraMySQL struct { IntegrationSuite } -var _ = Suite(&RDSAuroraMySQL{ +var _ = check.Suite(&RDSAuroraMySQL{ IntegrationSuite{ name: "rds-aurora-mysql", namespace: "rds-aurora-mysql-test", @@ -225,7 +225,7 @@ type RDSPostgreSQLDump struct { IntegrationSuite } -var _ = Suite(&RDSPostgreSQLDump{ +var _ = check.Suite(&RDSPostgreSQLDump{ IntegrationSuite{ name: "rds-postgres-dump", namespace: "rds-postgres-dump-test", @@ -241,7 +241,7 @@ type RDSPostgreSQLSnap struct { IntegrationSuite } -var _ = Suite(&RDSPostgreSQLSnap{ +var _ = check.Suite(&RDSPostgreSQLSnap{ IntegrationSuite{ name: "rds-postgres-snap", namespace: "rds-postgres-snap-test", @@ -256,7 +256,7 @@ type MSSQL struct { IntegrationSuite } -var _ = Suite(&MSSQL{ +var _ = check.Suite(&MSSQL{ IntegrationSuite{ name: "mssql", namespace: "mssql-test", @@ -272,7 +272,7 @@ type MysqlDBDepConfig struct { IntegrationSuite } -var _ = Suite(&MysqlDBDepConfig{ +var _ = check.Suite(&MysqlDBDepConfig{ IntegrationSuite{ name: "mysqldc", namespace: "mysqldc-test", @@ -287,7 +287,7 @@ type MongoDBDepConfig struct { IntegrationSuite } -var _ = Suite(&MongoDBDepConfig{ +var _ = check.Suite(&MongoDBDepConfig{ IntegrationSuite{ name: "mongodb", namespace: "mongodb-test", @@ -302,7 +302,7 @@ type PostgreSQLDepConfig struct { IntegrationSuite } -var _ = Suite(&PostgreSQLDepConfig{ +var _ = check.Suite(&PostgreSQLDepConfig{ IntegrationSuite{ name: "postgresdepconf", namespace: "postgresdepconf-test", @@ -318,7 +318,7 @@ type MysqlDBDepConfig4_4 struct { IntegrationSuite } -var _ = Suite(&MysqlDBDepConfig4_4{ +var _ = check.Suite(&MysqlDBDepConfig4_4{ IntegrationSuite{ name: "mysqldc", namespace: "mysqldc4-4-test", @@ -333,7 +333,7 @@ type MongoDBDepConfig4_4 struct { IntegrationSuite } -var _ = Suite(&MongoDBDepConfig4_4{ +var _ = check.Suite(&MongoDBDepConfig4_4{ IntegrationSuite{ name: "mongodb", namespace: "mongodb4-4-test", @@ -348,7 +348,7 @@ type PostgreSQLDepConfig4_4 struct { IntegrationSuite } -var _ = Suite(&PostgreSQLDepConfig4_4{ +var _ = check.Suite(&PostgreSQLDepConfig4_4{ IntegrationSuite{ name: "postgresdepconf", namespace: "postgresdepconf4-4-test", @@ -364,7 +364,7 @@ type MysqlDBDepConfig4_5 struct { IntegrationSuite } -var _ = Suite(&MysqlDBDepConfig4_5{ +var _ = check.Suite(&MysqlDBDepConfig4_5{ IntegrationSuite{ name: "mysqldc", namespace: "mysqldc4-5-test", @@ -379,7 +379,7 @@ type MongoDBDepConfig4_5 struct { IntegrationSuite } -var _ = Suite(&MongoDBDepConfig4_5{ +var _ = check.Suite(&MongoDBDepConfig4_5{ IntegrationSuite{ name: "mongodb", namespace: "mongodb4-5-test", @@ -394,7 +394,7 @@ type PostgreSQLDepConfig4_5 struct { IntegrationSuite } -var _ = Suite(&PostgreSQLDepConfig4_5{ +var _ = check.Suite(&PostgreSQLDepConfig4_5{ IntegrationSuite{ name: "postgresdepconf", namespace: "postgresdepconf4-5-test", @@ -409,7 +409,7 @@ type Kafka struct { IntegrationSuite } -var _ = Suite(&Kafka{ +var _ = check.Suite(&Kafka{ IntegrationSuite{ name: "kafka", namespace: "kafka-test", @@ -424,7 +424,7 @@ type MysqlDBDepConfig4_10 struct { IntegrationSuite } -var _ = Suite(&MysqlDBDepConfig4_10{ +var _ = check.Suite(&MysqlDBDepConfig4_10{ IntegrationSuite{ name: "mysqldc", namespace: "mysqldc4-10-test", @@ -439,7 +439,7 @@ type MongoDBDepConfig4_10 struct { IntegrationSuite } -var _ = Suite(&MongoDBDepConfig4_10{ +var _ = check.Suite(&MongoDBDepConfig4_10{ IntegrationSuite{ name: "mongodb", namespace: "mongodb4-10-test", @@ -454,7 +454,7 @@ type PostgreSQLDepConfig4_10 struct { IntegrationSuite } -var _ = Suite(&PostgreSQLDepConfig4_10{ +var _ = check.Suite(&PostgreSQLDepConfig4_10{ IntegrationSuite{ name: "postgresdepconf", namespace: "postgresdepconf4-10-test", @@ -469,7 +469,7 @@ type MysqlDBDepConfig4_11 struct { IntegrationSuite } -var _ = Suite(&MysqlDBDepConfig4_11{ +var _ = check.Suite(&MysqlDBDepConfig4_11{ IntegrationSuite{ name: "mysqldc", namespace: "mysqldc4-11-test", @@ -484,7 +484,7 @@ type PostgreSQLDepConfig4_11 struct { IntegrationSuite } -var _ = Suite(&PostgreSQLDepConfig4_11{ +var _ = check.Suite(&PostgreSQLDepConfig4_11{ IntegrationSuite{ name: "postgresdepconf", namespace: "postgresdepconf4-11-test", @@ -499,7 +499,7 @@ type MysqlDBDepConfig4_12 struct { IntegrationSuite } -var _ = Suite(&MysqlDBDepConfig4_12{ +var _ = check.Suite(&MysqlDBDepConfig4_12{ IntegrationSuite{ name: "mysqldc", namespace: "mysqldc4-12-test", @@ -514,7 +514,7 @@ type PostgreSQLDepConfig4_12 struct { IntegrationSuite } -var _ = Suite(&PostgreSQLDepConfig4_12{ +var _ = check.Suite(&PostgreSQLDepConfig4_12{ IntegrationSuite{ name: "postgresdepconf", namespace: "postgresdepconf4-12-test", @@ -529,7 +529,7 @@ type MysqlDBDepConfig4_13 struct { IntegrationSuite } -var _ = Suite(&MysqlDBDepConfig4_13{ +var _ = check.Suite(&MysqlDBDepConfig4_13{ IntegrationSuite{ name: "mysqldc", namespace: "mysqldc4-13-test", @@ -544,7 +544,7 @@ type PostgreSQLDepConfig4_13 struct { IntegrationSuite } -var _ = Suite(&PostgreSQLDepConfig4_13{ +var _ = check.Suite(&PostgreSQLDepConfig4_13{ IntegrationSuite{ name: "postgresdepconf", namespace: "postgresdepconf4-13-test", @@ -559,7 +559,7 @@ type MysqlDBDepConfig4_14 struct { IntegrationSuite } -var _ = Suite(&MysqlDBDepConfig4_14{ +var _ = check.Suite(&MysqlDBDepConfig4_14{ IntegrationSuite{ name: "mysqldc", namespace: "mysqldc4-14-test", @@ -574,7 +574,7 @@ type PostgreSQLDepConfig4_14 struct { IntegrationSuite } -var _ = Suite(&PostgreSQLDepConfig4_14{ +var _ = check.Suite(&PostgreSQLDepConfig4_14{ IntegrationSuite{ name: "postgresdepconf", namespace: "postgresdepconf4-14-test", diff --git a/pkg/testing/integration_test.go b/pkg/testing/integration_test.go index 4c4123fc39..80bce4d248 100644 --- a/pkg/testing/integration_test.go +++ b/pkg/testing/integration_test.go @@ -24,7 +24,7 @@ import ( "time" "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -36,19 +36,20 @@ import ( crclient "github.com/kanisterio/kanister/pkg/client/clientset/versioned/typed/cr/v1alpha1" "github.com/kanisterio/kanister/pkg/controller" "github.com/kanisterio/kanister/pkg/field" - _ "github.com/kanisterio/kanister/pkg/function" "github.com/kanisterio/kanister/pkg/kanctl" "github.com/kanisterio/kanister/pkg/kube" "github.com/kanisterio/kanister/pkg/log" "github.com/kanisterio/kanister/pkg/poll" "github.com/kanisterio/kanister/pkg/resource" "github.com/kanisterio/kanister/pkg/testutil" + + _ "github.com/kanisterio/kanister/pkg/function" ) // Hook up gocheck into the "go test" runner for integration builds func Test(t *test.T) { integrationSetup(t) - TestingT(t) + check.TestingT(t) integrationCleanup(t) } @@ -177,17 +178,17 @@ func newSecretProfile() *secretProfile { } } -func (s *IntegrationSuite) SetUpSuite(c *C) { +func (s *IntegrationSuite) SetUpSuite(c *check.C) { ctx := context.Background() _, s.cancel = context.WithCancel(ctx) // Instantiate Client SDKs cfg, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.cli, err = kubernetes.NewForConfig(cfg) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.crCli, err = crclient.NewForConfig(cfg) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } // TestRun executes e2e workflow on the app @@ -198,7 +199,7 @@ func (s *IntegrationSuite) SetUpSuite(c *C) { // 5. Delete DB data // 6. Restore data from backup // 7. Uninstall DB app -func (s *IntegrationSuite) TestRun(c *C) { +func (s *IntegrationSuite) TestRun(c *check.C) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -215,7 +216,7 @@ func (s *IntegrationSuite) TestRun(c *C) { // Create namespace err = createNamespace(s.cli, s.namespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Create profile if s.profile == nil { @@ -227,18 +228,18 @@ func (s *IntegrationSuite) TestRun(c *C) { // Install db err = s.app.Install(ctx, s.namespace) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Check if ready ok, err := s.app.IsReady(ctx) - c.Assert(err, IsNil) - c.Assert(ok, Equals, true) + c.Assert(err, check.IsNil) + c.Assert(ok, check.Equals, true) // Create blueprint bp := s.bp.Blueprint() - c.Assert(bp, NotNil) + c.Assert(bp, check.NotNil) _, err = s.crCli.Blueprints(kontroller.namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) var configMaps, secrets map[string]crv1alpha1.ObjectReference testEntries := 3 @@ -246,22 +247,22 @@ func (s *IntegrationSuite) TestRun(c *C) { if a, ok := s.app.(app.DatabaseApp); ok { // wait for application to be actually ready err = pingAppAndWait(ctx, a) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = a.Reset(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = a.Initialize(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // Add few entries for i := 0; i < testEntries; i++ { - c.Assert(a.Insert(ctx), IsNil) + c.Assert(a.Insert(ctx), check.IsNil) } count, err := a.Count(ctx) - c.Assert(err, IsNil) - c.Assert(count, Equals, testEntries) + c.Assert(err, check.IsNil) + c.Assert(count, check.Equals, testEntries) } // Get Secret and ConfigMap object references @@ -277,7 +278,7 @@ func (s *IntegrationSuite) TestRun(c *C) { as := newActionSet(bp.GetName(), profileName, kontroller.namespace, s.app.Object(), configMaps, secrets) // Take backup backup := s.createActionset(ctx, c, as, "backup", nil) - c.Assert(len(backup), Not(Equals), 0) + c.Assert(len(backup), check.Not(check.Equals), 0) // Save timestamp for PITR var restoreOptions map[string]string @@ -290,35 +291,35 @@ func (s *IntegrationSuite) TestRun(c *C) { // Add few more entries with timestamp > pitr time.Sleep(time.Second) if a, ok := s.app.(app.DatabaseApp); ok { - c.Assert(a.Insert(ctx), IsNil) - c.Assert(a.Insert(ctx), IsNil) + c.Assert(a.Insert(ctx), check.IsNil) + c.Assert(a.Insert(ctx), check.IsNil) count, err := a.Count(ctx) - c.Assert(err, IsNil) - c.Assert(count, Equals, testEntries+2) + c.Assert(err, check.IsNil) + c.Assert(count, check.Equals, testEntries+2) } } // Reset DB if a, ok := s.app.(app.DatabaseApp); ok { err = a.Reset(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } // Restore backup pas, err := s.crCli.ActionSets(kontroller.namespace).Get(ctx, backup, metav1.GetOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) s.createActionset(ctx, c, pas, "restore", restoreOptions) // Verify data if a, ok := s.app.(app.DatabaseApp); ok { // wait for application to be actually ready err = pingAppAndWait(ctx, a) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) count, err := a.Count(ctx) - c.Assert(err, IsNil) - c.Assert(count, Equals, testEntries) + c.Assert(err, check.IsNil) + c.Assert(count, check.Equals, testEntries) } // Delete snapshots @@ -348,9 +349,9 @@ func newActionSet(bpName, profile, profileNs string, object crv1alpha1.ObjectRef } } -func (s *IntegrationSuite) createProfile(c *C, ctx context.Context) string { +func (s *IntegrationSuite) createProfile(c *check.C, ctx context.Context) string { secret, err := s.cli.CoreV1().Secrets(kontroller.namespace).Create(ctx, s.profile.secret, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // set secret ref in profile s.profile.profile.Credential.KeyPair.Secret = crv1alpha1.ObjectReference{ @@ -358,12 +359,12 @@ func (s *IntegrationSuite) createProfile(c *C, ctx context.Context) string { Namespace: secret.GetNamespace(), } profile, err := s.crCli.Profiles(kontroller.namespace).Create(ctx, s.profile.profile, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) return profile.GetName() } -func validateBlueprint(c *C, bp crv1alpha1.Blueprint, configMaps, secrets map[string]crv1alpha1.ObjectReference) { +func validateBlueprint(c *check.C, bp crv1alpha1.Blueprint, configMaps, secrets map[string]crv1alpha1.ObjectReference) { for _, action := range bp.Actions { // Validate BP action ConfigMapNames with the app.ConfigMaps references for _, bpc := range action.ConfigMapNames { @@ -373,7 +374,7 @@ func validateBlueprint(c *C, bp crv1alpha1.Blueprint, configMaps, secrets map[st validConfig = true } } - c.Assert(validConfig, Equals, true) + c.Assert(validConfig, check.Equals, true) } // Validate BP action SecretNames with the app.Secrets reference for _, bps := range action.SecretNames { @@ -383,22 +384,22 @@ func validateBlueprint(c *C, bp crv1alpha1.Blueprint, configMaps, secrets map[st validSecret = true } } - c.Assert(validSecret, Equals, true) + c.Assert(validSecret, check.Equals, true) } } } // createActionset creates and wait for actionset to complete -func (s *IntegrationSuite) createActionset(ctx context.Context, c *C, as *crv1alpha1.ActionSet, action string, options map[string]string) string { +func (s *IntegrationSuite) createActionset(ctx context.Context, c *check.C, as *crv1alpha1.ActionSet, action string, options map[string]string) string { var err error switch action { case "backup": as.Spec.Actions[0].Options = options as, err = s.crCli.ActionSets(kontroller.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) case "restore", "delete": as, err = restoreActionSetSpecs(as, action) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) as.Spec.Actions[0].Options = options if action == "delete" { // object of delete is always namespace of actionset @@ -412,7 +413,7 @@ func (s *IntegrationSuite) createActionset(ctx context.Context, c *C, as *crv1al } } as, err = s.crCli.ActionSets(kontroller.namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) default: c.Errorf("Invalid action %s while creating ActionSet", action) } @@ -430,7 +431,7 @@ func (s *IntegrationSuite) createActionset(ctx context.Context, c *C, as *crv1al } return false, nil }) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) return as.GetName() } @@ -457,20 +458,20 @@ func createNamespace(cli kubernetes.Interface, name string) error { return nil } -func (s *IntegrationSuite) TearDownSuite(c *C) { +func (s *IntegrationSuite) TearDownSuite(c *check.C) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Uninstall app if !s.skip { err := s.app.Uninstall(ctx) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } // Uninstall implementation of the apps doesn't delete namespace // Delete the namespace separately err := s.cli.CoreV1().Namespaces().Delete(ctx, s.namespace, metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } func pingAppAndWait(ctx context.Context, a app.DatabaseApp) error { diff --git a/pkg/testing/testing_test.go b/pkg/testing/testing_test.go index d9ac5897e4..874cddc46f 100644 --- a/pkg/testing/testing_test.go +++ b/pkg/testing/testing_test.go @@ -20,10 +20,10 @@ package testing import ( test "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner (non-integration builds) func Test(t *test.T) { - TestingT(t) + check.TestingT(t) } diff --git a/pkg/testutil/func_test.go b/pkg/testutil/func_test.go index f63dcca7a1..aa41ae175c 100644 --- a/pkg/testutil/func_test.go +++ b/pkg/testutil/func_test.go @@ -18,7 +18,7 @@ import ( "context" "strings" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/param" ) @@ -26,29 +26,29 @@ import ( type FuncSuite struct { } -var _ = Suite(&FuncSuite{}) +var _ = check.Suite(&FuncSuite{}) -func (s *FuncSuite) SetUpSuite(c *C) { +func (s *FuncSuite) SetUpSuite(c *check.C) { } -func (s *FuncSuite) TearDownSuite(c *C) { +func (s *FuncSuite) TearDownSuite(c *check.C) { } -func (s *FuncSuite) TestFailFunc(c *C) { +func (s *FuncSuite) TestFailFunc(c *check.C) { ctx := context.Background() go func() { _, err := failFunc(ctx, param.TemplateParams{}, nil) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) }() - c.Assert(FailFuncError().Error(), Equals, "Kanister function failed") + c.Assert(FailFuncError().Error(), check.Equals, "Kanister function failed") } -func (s *FuncSuite) TestWaitFunc(c *C) { +func (s *FuncSuite) TestWaitFunc(c *check.C) { ctx := context.Background() done := make(chan bool) go func() { _, err := waitFunc(ctx, param.TemplateParams{}, nil) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) close(done) }() select { @@ -60,43 +60,43 @@ func (s *FuncSuite) TestWaitFunc(c *C) { <-done } -func (s *FuncSuite) TestArgsFunc(c *C) { +func (s *FuncSuite) TestArgsFunc(c *check.C) { ctx := context.Background() args := map[string]interface{}{"arg1": []string{"foo", "bar"}} go func() { _, err := argsFunc(ctx, param.TemplateParams{}, args) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() - c.Assert(ArgFuncArgs(), DeepEquals, args) + c.Assert(ArgFuncArgs(), check.DeepEquals, args) } -func (s *FuncSuite) TestOutputFunc(c *C) { +func (s *FuncSuite) TestOutputFunc(c *check.C) { ctx := context.Background() args := map[string]interface{}{"arg1": []string{"foo", "bar"}} go func() { output, err := outputFunc(ctx, param.TemplateParams{}, args) - c.Assert(err, IsNil) - c.Assert(output, DeepEquals, args) + c.Assert(err, check.IsNil) + c.Assert(output, check.DeepEquals, args) }() - c.Assert(OutputFuncOut(), DeepEquals, args) + c.Assert(OutputFuncOut(), check.DeepEquals, args) } -func (s *FuncSuite) TestCancelFunc(c *C) { +func (s *FuncSuite) TestCancelFunc(c *check.C) { ctx, cancel := context.WithCancel(context.Background()) done := make(chan bool) go func() { _, err := cancelFunc(ctx, param.TemplateParams{}, nil) - c.Assert(err, NotNil) - c.Assert(strings.Contains(err.Error(), "context canceled"), Equals, true) + c.Assert(err, check.NotNil) + c.Assert(strings.Contains(err.Error(), "context canceled"), check.Equals, true) close(done) }() - c.Assert(CancelFuncStarted(), NotNil) + c.Assert(CancelFuncStarted(), check.NotNil) select { case <-done: c.FailNow() default: } cancel() - c.Assert(CancelFuncOut().Error(), DeepEquals, "context canceled") + c.Assert(CancelFuncOut().Error(), check.DeepEquals, "context canceled") <-done } diff --git a/pkg/testutil/mockblockstorage/mockblockstorage_test.go b/pkg/testutil/mockblockstorage/mockblockstorage_test.go index e7724057b6..167c349519 100644 --- a/pkg/testutil/mockblockstorage/mockblockstorage_test.go +++ b/pkg/testutil/mockblockstorage/mockblockstorage_test.go @@ -17,19 +17,19 @@ package mockblockstorage import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" "github.com/kanisterio/kanister/pkg/blockstorage" ) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type MockSuite struct{} -var _ = Suite(&MockSuite{}) +var _ = check.Suite(&MockSuite{}) -func (s *MockSuite) TestMockStorage(c *C) { +func (s *MockSuite) TestMockStorage(c *check.C) { mock, err := Get(blockstorage.TypeEBS) - c.Assert(err, IsNil) - c.Assert(mock.Type(), Equals, blockstorage.TypeEBS) + c.Assert(err, check.IsNil) + c.Assert(mock.Type(), check.Equals, blockstorage.TypeEBS) } diff --git a/pkg/testutil/testutil_test.go b/pkg/testutil/testutil_test.go index 95ec6eceb4..2bdf040012 100644 --- a/pkg/testutil/testutil_test.go +++ b/pkg/testutil/testutil_test.go @@ -18,7 +18,7 @@ import ( "context" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -28,43 +28,43 @@ import ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type TestUtilSuite struct { } -var _ = Suite(&TestUtilSuite{}) +var _ = check.Suite(&TestUtilSuite{}) -func (s *TestUtilSuite) TestDeployment(c *C) { +func (s *TestUtilSuite) TestDeployment(c *check.C) { config, err := kube.LoadConfig() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) cli, err := kubernetes.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) crCli, err := crclientv1alpha1.NewForConfig(config) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) ctx := context.Background() ns := NewTestNamespace() ns, err = cli.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err := cli.CoreV1().Namespaces().Delete(context.TODO(), ns.GetName(), metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() d := NewTestDeployment(1) d, err = cli.AppsV1().Deployments(ns.GetName()).Create(ctx, d, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err = cli.AppsV1().Deployments(ns.GetName()).Delete(context.TODO(), d.GetName(), metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() ss := NewTestStatefulSet(1) ss, err = cli.AppsV1().StatefulSets(ns.GetName()).Create(ctx, ss, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err := cli.AppsV1().StatefulSets(ns.GetName()).Delete(context.TODO(), ss.GetName(), metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() for _, po := range []struct { @@ -85,26 +85,26 @@ func (s *TestUtilSuite) TestDeployment(c *C) { cm := NewTestConfigMap() cm, err = cli.CoreV1().ConfigMaps(ns.GetName()).Create(ctx, cm, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = cli.CoreV1().ConfigMaps(ns.GetName()).Delete(context.TODO(), cm.GetName(), metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func testCRs(c *C, ctx context.Context, cli crclientv1alpha1.CrV1alpha1Interface, namespace, poKind, poName string) { +func testCRs(c *check.C, ctx context.Context, cli crclientv1alpha1.CrV1alpha1Interface, namespace, poKind, poName string) { var err error bp := NewTestBlueprint(poKind, FailFuncName) bp = BlueprintWithConfigMap(bp) bp, err = cli.Blueprints(namespace).Create(ctx, bp, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer func() { err := cli.Blueprints(namespace).Delete(context.TODO(), bp.GetName(), metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) }() as := NewTestActionSet(namespace, bp.GetName(), poKind, poName, namespace, kanister.DefaultVersion, actionName) as = ActionSetWithConfigMap(as, "") as, err = cli.ActionSets(namespace).Create(ctx, as, metav1.CreateOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = cli.ActionSets(namespace).Delete(context.TODO(), as.GetName(), metav1.DeleteOptions{}) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } diff --git a/pkg/tools/grype_report_parser_tool_test.go b/pkg/tools/grype_report_parser_tool_test.go index 7b8bb84e10..4d45e7827b 100644 --- a/pkg/tools/grype_report_parser_tool_test.go +++ b/pkg/tools/grype_report_parser_tool_test.go @@ -3,52 +3,52 @@ package main import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type VulnerabilityParserSuite struct{} // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } -var _ = Suite(&VulnerabilityParserSuite{}) +var _ = check.Suite(&VulnerabilityParserSuite{}) -func (v *VulnerabilityParserSuite) TestNonExistentResult(c *C) { +func (v *VulnerabilityParserSuite) TestNonExistentResult(c *check.C) { severityLevels := []string{"High", "Critical"} matchingVulnerabilities, err := parseVulerabilitiesReport("testdata/result_non_existent.json", severityLevels) - c.Assert(len(matchingVulnerabilities), Equals, 0) - c.Assert(err, NotNil) + c.Assert(len(matchingVulnerabilities), check.Equals, 0) + c.Assert(err, check.NotNil) } -func (v *VulnerabilityParserSuite) TestInvalidJson(c *C) { +func (v *VulnerabilityParserSuite) TestInvalidJson(c *check.C) { severityLevels := []string{"High", "Critical"} matchingVulnerabilities, err := parseVulerabilitiesReport("testdata/results_invalid.json", severityLevels) - c.Assert(len(matchingVulnerabilities), Equals, 0) - c.Assert(err, NotNil) + c.Assert(len(matchingVulnerabilities), check.Equals, 0) + c.Assert(err, check.NotNil) } -func (v *VulnerabilityParserSuite) TestValidJsonWithZeroVulnerabilities(c *C) { +func (v *VulnerabilityParserSuite) TestValidJsonWithZeroVulnerabilities(c *check.C) { severityLevels := []string{"High", "Critical"} matchingVulnerabilities, err := parseVulerabilitiesReport("testdata/results_valid_no_matches.json", severityLevels) - c.Assert(len(matchingVulnerabilities), Equals, 0) - c.Assert(err, IsNil) + c.Assert(len(matchingVulnerabilities), check.Equals, 0) + c.Assert(err, check.IsNil) } -func (v *VulnerabilityParserSuite) TestValidJsonForLowVulerabilities(c *C) { +func (v *VulnerabilityParserSuite) TestValidJsonForLowVulerabilities(c *check.C) { severityLevels := []string{"Low", "Medium"} matchingVulnerabilities, err := parseVulerabilitiesReport("testdata/results_valid.json", severityLevels) - c.Assert(len(matchingVulnerabilities), Equals, 0) - c.Assert(err, IsNil) + c.Assert(len(matchingVulnerabilities), check.Equals, 0) + c.Assert(err, check.IsNil) } -func (v *VulnerabilityParserSuite) TestValidJsonForMatchingVulerabilities(c *C) { +func (v *VulnerabilityParserSuite) TestValidJsonForMatchingVulerabilities(c *check.C) { severityLevels := []string{"High", "Critical"} expectedIds := []string{"CVE-2016-10228", "CVE-2016-10229"} matchingVulnerabilities, err := parseVulerabilitiesReport("testdata/results_valid.json", severityLevels) - c.Assert(len(matchingVulnerabilities), Equals, 2) - c.Assert(err, IsNil) + c.Assert(len(matchingVulnerabilities), check.Equals, 2) + c.Assert(err, check.IsNil) for index, vulnerability := range matchingVulnerabilities { - c.Assert(vulnerability.Vulnerabilities.ID, Equals, expectedIds[index]) - c.Assert(vulnerability.Vulnerabilities.Severity, Equals, severityLevels[index]) + c.Assert(vulnerability.Vulnerabilities.ID, check.Equals, expectedIds[index]) + c.Assert(vulnerability.Vulnerabilities.Severity, check.Equals, severityLevels[index]) } } diff --git a/pkg/validate/error_test.go b/pkg/validate/error_test.go index 7de4ab1c8d..05b9cba995 100644 --- a/pkg/validate/error_test.go +++ b/pkg/validate/error_test.go @@ -18,14 +18,14 @@ import ( "fmt" "github.com/pkg/errors" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) type ErrorSuite struct{} -var _ = Suite(&ErrorSuite{}) +var _ = check.Suite(&ErrorSuite{}) -func (s *ErrorSuite) TestIsError(c *C) { +func (s *ErrorSuite) TestIsError(c *check.C) { for _, tc := range []struct { err error is bool @@ -63,6 +63,6 @@ func (s *ErrorSuite) TestIsError(c *C) { is: false, }, } { - c.Check(IsError(tc.err), Equals, tc.is) + c.Check(IsError(tc.err), check.Equals, tc.is) } } diff --git a/pkg/validate/validate_test.go b/pkg/validate/validate_test.go index b6f7e086d2..9d2b8c3e5b 100644 --- a/pkg/validate/validate_test.go +++ b/pkg/validate/validate_test.go @@ -18,7 +18,7 @@ import ( "context" "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -31,33 +31,33 @@ import ( ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type ValidateSuite struct{} -var _ = Suite(&ValidateSuite{}) +var _ = check.Suite(&ValidateSuite{}) -func (s *ValidateSuite) TestActionSet(c *C) { +func (s *ValidateSuite) TestActionSet(c *check.C) { for _, tc := range []struct { as *crv1alpha1.ActionSet - checker Checker + checker check.Checker }{ { as: &crv1alpha1.ActionSet{}, - checker: NotNil, + checker: check.NotNil, }, { as: &crv1alpha1.ActionSet{ Spec: &crv1alpha1.ActionSetSpec{}, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSet{ ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}, Spec: &crv1alpha1.ActionSetSpec{}, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSet{ @@ -78,7 +78,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSet{ @@ -99,7 +99,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSet{ @@ -120,7 +120,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSet{ @@ -141,14 +141,14 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSet{ Spec: &crv1alpha1.ActionSetSpec{}, Status: &crv1alpha1.ActionSetStatus{}, }, - checker: NotNil, + checker: check.NotNil, }, { as: &crv1alpha1.ActionSet{ @@ -157,7 +157,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { State: crv1alpha1.StatePending, }, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSet{ @@ -175,7 +175,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { State: crv1alpha1.StatePending, }, }, - checker: NotNil, + checker: check.NotNil, }, { as: &crv1alpha1.ActionSet{ @@ -196,7 +196,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSet{ @@ -217,7 +217,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, // NamespaceKind { @@ -234,7 +234,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, // StatefulSetKind { @@ -251,7 +251,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, // DeploymentKind { @@ -268,7 +268,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, // PVCKind { @@ -285,7 +285,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, // Generic K8s resource (apiversion, resource missing) { @@ -302,7 +302,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: NotNil, + checker: check.NotNil, }, // Generic K8s resource { @@ -320,7 +320,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, // No object specified { as: &crv1alpha1.ActionSet{ @@ -331,7 +331,7 @@ func (s *ValidateSuite) TestActionSet(c *C) { }, }, }, - checker: NotNil, + checker: check.NotNil, }, } { err := ActionSet(tc.as) @@ -339,24 +339,24 @@ func (s *ValidateSuite) TestActionSet(c *C) { } } -func (s *ValidateSuite) TestActionSetStatus(c *C) { +func (s *ValidateSuite) TestActionSetStatus(c *check.C) { for _, tc := range []struct { as *crv1alpha1.ActionSetStatus - checker Checker + checker check.Checker }{ { as: nil, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSetStatus{}, - checker: NotNil, + checker: check.NotNil, }, { as: &crv1alpha1.ActionSetStatus{ State: crv1alpha1.StatePending, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSetStatus{ @@ -365,7 +365,7 @@ func (s *ValidateSuite) TestActionSetStatus(c *C) { {}, }, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSetStatus{ @@ -374,7 +374,7 @@ func (s *ValidateSuite) TestActionSetStatus(c *C) { {}, }, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSetStatus{ @@ -385,7 +385,7 @@ func (s *ValidateSuite) TestActionSetStatus(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSetStatus{ @@ -398,7 +398,7 @@ func (s *ValidateSuite) TestActionSetStatus(c *C) { }, }, }, - checker: NotNil, + checker: check.NotNil, }, { as: &crv1alpha1.ActionSetStatus{ @@ -411,7 +411,7 @@ func (s *ValidateSuite) TestActionSetStatus(c *C) { }, }, }, - checker: NotNil, + checker: check.NotNil, }, { as: &crv1alpha1.ActionSetStatus{ @@ -426,7 +426,7 @@ func (s *ValidateSuite) TestActionSetStatus(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSetStatus{ @@ -441,7 +441,7 @@ func (s *ValidateSuite) TestActionSetStatus(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, { as: &crv1alpha1.ActionSetStatus{ @@ -456,7 +456,7 @@ func (s *ValidateSuite) TestActionSetStatus(c *C) { }, }, }, - checker: NotNil, + checker: check.NotNil, }, { as: &crv1alpha1.ActionSetStatus{ @@ -474,7 +474,7 @@ func (s *ValidateSuite) TestActionSetStatus(c *C) { }, }, }, - checker: NotNil, + checker: check.NotNil, }, } { err := actionSetStatus(tc.as) @@ -482,15 +482,15 @@ func (s *ValidateSuite) TestActionSetStatus(c *C) { } } -func (s *ValidateSuite) TestBlueprint(c *C) { +func (s *ValidateSuite) TestBlueprint(c *check.C) { err := Blueprint(nil) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } -func (s *ValidateSuite) TestProfileSchema(c *C) { +func (s *ValidateSuite) TestProfileSchema(c *check.C) { tcs := []struct { profile *crv1alpha1.Profile - checker Checker + checker check.Checker }{ { profile: &crv1alpha1.Profile{ @@ -505,7 +505,7 @@ func (s *ValidateSuite) TestProfileSchema(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, { profile: &crv1alpha1.Profile{ @@ -524,7 +524,7 @@ func (s *ValidateSuite) TestProfileSchema(c *C) { }, }, }, - checker: IsNil, + checker: check.IsNil, }, // Missing secret namespace { @@ -539,7 +539,7 @@ func (s *ValidateSuite) TestProfileSchema(c *C) { }, }, }, - checker: NotNil, + checker: check.NotNil, }, // Missing secret name { @@ -554,7 +554,7 @@ func (s *ValidateSuite) TestProfileSchema(c *C) { }, }, }, - checker: NotNil, + checker: check.NotNil, }, // Missing secret field { @@ -573,7 +573,7 @@ func (s *ValidateSuite) TestProfileSchema(c *C) { }, }, }, - checker: NotNil, + checker: check.NotNil, }, // Missing id field { @@ -592,7 +592,7 @@ func (s *ValidateSuite) TestProfileSchema(c *C) { }, }, }, - checker: NotNil, + checker: check.NotNil, }, } @@ -602,14 +602,14 @@ func (s *ValidateSuite) TestProfileSchema(c *C) { } } -func (s *ValidateSuite) TestOsSecretFromProfile(c *C) { +func (s *ValidateSuite) TestOsSecretFromProfile(c *check.C) { ctx := context.Background() for i, tc := range []struct { pType objectstore.ProviderType p *crv1alpha1.Profile cli kubernetes.Interface expected *objectstore.Secret - errChecker Checker + errChecker check.Checker }{ { p: &crv1alpha1.Profile{ @@ -642,7 +642,7 @@ func (s *ValidateSuite) TestOsSecretFromProfile(c *C) { EnvironmentName: "env", }, }, - errChecker: IsNil, + errChecker: check.IsNil, }, { p: &crv1alpha1.Profile{ @@ -679,7 +679,7 @@ func (s *ValidateSuite) TestOsSecretFromProfile(c *C) { EnvironmentName: "", }, }, - errChecker: IsNil, + errChecker: check.IsNil, }, { // bad secret field err p: &crv1alpha1.Profile{ @@ -709,7 +709,7 @@ func (s *ValidateSuite) TestOsSecretFromProfile(c *C) { }, }), expected: nil, - errChecker: NotNil, + errChecker: check.NotNil, }, { // bad id field err p: &crv1alpha1.Profile{ @@ -739,7 +739,7 @@ func (s *ValidateSuite) TestOsSecretFromProfile(c *C) { }, }), expected: nil, - errChecker: NotNil, + errChecker: check.NotNil, }, { // missing secret p: &crv1alpha1.Profile{ @@ -758,7 +758,7 @@ func (s *ValidateSuite) TestOsSecretFromProfile(c *C) { pType: objectstore.ProviderTypeAzure, cli: fake.NewSimpleClientset(), expected: nil, - errChecker: NotNil, + errChecker: check.NotNil, }, { // missing keypair p: &crv1alpha1.Profile{ @@ -770,7 +770,7 @@ func (s *ValidateSuite) TestOsSecretFromProfile(c *C) { pType: objectstore.ProviderTypeAzure, cli: fake.NewSimpleClientset(), expected: nil, - errChecker: NotNil, + errChecker: check.NotNil, }, { // missing secret p: &crv1alpha1.Profile{ @@ -785,11 +785,11 @@ func (s *ValidateSuite) TestOsSecretFromProfile(c *C) { pType: objectstore.ProviderTypeAzure, cli: fake.NewSimpleClientset(), expected: nil, - errChecker: NotNil, + errChecker: check.NotNil, }, } { secret, err := osSecretFromProfile(ctx, tc.pType, tc.p, tc.cli) - c.Check(secret, DeepEquals, tc.expected, Commentf("test number: %d", i)) + c.Check(secret, check.DeepEquals, tc.expected, check.Commentf("test number: %d", i)) c.Check(err, tc.errChecker) } } diff --git a/pkg/virtualfs/directory_test.go b/pkg/virtualfs/directory_test.go index a593ef4671..f17d4bcc15 100644 --- a/pkg/virtualfs/directory_test.go +++ b/pkg/virtualfs/directory_test.go @@ -18,7 +18,7 @@ import ( "context" "os" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) const ( @@ -28,108 +28,108 @@ const ( type DirectorySuite struct{} -var _ = Suite(&DirectorySuite{}) +var _ = check.Suite(&DirectorySuite{}) -func (s *DirectorySuite) TestAddDir(c *C) { +func (s *DirectorySuite) TestAddDir(c *check.C) { sourceDir, err := NewDirectory("root") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) // New directory dir, err := sourceDir.AddDir("d1", defaultPermissions) - c.Assert(err, IsNil) - c.Assert(dir.Name(), Equals, "d1") + c.Assert(err, check.IsNil) + c.Assert(dir.Name(), check.Equals, "d1") // Duplicate directory _, err = sourceDir.AddDir("d1", defaultPermissions) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) // Invalid name _, err = sourceDir.AddDir("/d2", defaultPermissions) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (s *DirectorySuite) TestAddAllDirs(c *C) { +func (s *DirectorySuite) TestAddAllDirs(c *check.C) { sourceDir, err := NewDirectory("root") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) c.Log("Add a directory - root/d1") subdir, err := sourceDir.AddAllDirs("d1", defaultPermissions) - c.Assert(err, IsNil) - c.Assert(subdir.Name(), Equals, "d1") + c.Assert(err, check.IsNil) + c.Assert(subdir.Name(), check.Equals, "d1") d1, err := sourceDir.Subdir("d1") - c.Assert(err, IsNil) - c.Assert(d1, NotNil) - c.Assert(d1.Name(), Equals, "d1") - c.Assert(d1.Mode(), Equals, dirPermissions) + c.Assert(err, check.IsNil) + c.Assert(d1, check.NotNil) + c.Assert(d1.Name(), check.Equals, "d1") + c.Assert(d1.Mode(), check.Equals, dirPermissions) c.Log("Add a sub-dir under an existing directory - root/d1/d2") subdir, err = sourceDir.AddAllDirs("d1/d2", defaultPermissions) - c.Assert(err, IsNil) - c.Assert(subdir.Name(), Equals, "d2") + c.Assert(err, check.IsNil) + c.Assert(subdir.Name(), check.Equals, "d2") d2, err := d1.Subdir("d2") - c.Assert(err, IsNil) - c.Assert(d2, NotNil) - c.Assert(d2.Name(), Equals, "d2") - c.Assert(d2.Mode(), Equals, dirPermissions) + c.Assert(err, check.IsNil) + c.Assert(d2, check.NotNil) + c.Assert(d2.Name(), check.Equals, "d2") + c.Assert(d2.Mode(), check.Equals, dirPermissions) c.Log("Add third/fourth level dirs - root/d1/d3/d4") subdir, err = sourceDir.AddAllDirs("d1/d3/d4", defaultPermissions) - c.Assert(err, IsNil) - c.Assert(subdir.Name(), Equals, "d4") + c.Assert(err, check.IsNil) + c.Assert(subdir.Name(), check.Equals, "d4") d3, err := d1.Subdir("d3") - c.Assert(err, IsNil) - c.Assert(d3, NotNil) - c.Assert(d3.Name(), Equals, "d3") - c.Assert(d3.Mode(), Equals, dirPermissions) + c.Assert(err, check.IsNil) + c.Assert(d3, check.NotNil) + c.Assert(d3.Name(), check.Equals, "d3") + c.Assert(d3.Mode(), check.Equals, dirPermissions) d4, err := d3.Subdir("d4") - c.Assert(err, IsNil) - c.Assert(d4, NotNil) - c.Assert(d4.Name(), Equals, "d4") - c.Assert(d4.Mode(), Equals, dirPermissions) + c.Assert(err, check.IsNil) + c.Assert(d4, check.NotNil) + c.Assert(d4.Name(), check.Equals, "d4") + c.Assert(d4.Mode(), check.Equals, dirPermissions) c.Log("Fail adding directory under a file - root/f1/d6") f, err := AddFileWithContent(sourceDir, "f1", []byte("test"), defaultPermissions, defaultPermissions) - c.Assert(err, IsNil) - c.Assert(f.Name(), Equals, "f1") + c.Assert(err, check.IsNil) + c.Assert(f.Name(), check.Equals, "f1") _, err = sourceDir.AddAllDirs("f1/d6", defaultPermissions) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (s *DirectorySuite) TestAddFile(c *C) { +func (s *DirectorySuite) TestAddFile(c *check.C) { sourceDir, err := NewDirectory("root") - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) c.Log("Add file with stream source - root/f1") f, err := AddFileWithStreamSource(sourceDir, "f1", "http://test-endpoint", defaultPermissions, defaultPermissions) - c.Assert(err, IsNil) - c.Assert(f, NotNil) - c.Assert(f.Name(), Equals, "f1") + c.Assert(err, check.IsNil) + c.Assert(f, check.NotNil) + c.Assert(f.Name(), check.Equals, "f1") c.Log("Add file with stream source at third level - root/d1/f2") f, err = AddFileWithStreamSource(sourceDir, "d1/f2", "http://test-endpoint", defaultPermissions, defaultPermissions) - c.Assert(err, IsNil) - c.Assert(f, NotNil) - c.Assert(f.Name(), Equals, "f2") + c.Assert(err, check.IsNil) + c.Assert(f, check.NotNil) + c.Assert(f.Name(), check.Equals, "f2") d1, err := sourceDir.Subdir("d1") - c.Assert(err, IsNil) - c.Assert(d1, NotNil) - c.Assert(d1.Name(), Equals, "d1") - c.Assert(d1.Mode(), Equals, dirPermissions) + c.Assert(err, check.IsNil) + c.Assert(d1, check.NotNil) + c.Assert(d1.Name(), check.Equals, "d1") + c.Assert(d1.Mode(), check.Equals, dirPermissions) e, err := d1.Child(context.Background(), "f2") - c.Assert(err, IsNil) - c.Assert(e, NotNil) + c.Assert(err, check.IsNil) + c.Assert(e, check.NotNil) c.Log("Add file with content at third level - root/d2/f3") f, err = AddFileWithContent(sourceDir, "d2/f3", []byte("test"), defaultPermissions, defaultPermissions) - c.Assert(err, IsNil) - c.Assert(f, NotNil) - c.Assert(f.Name(), Equals, "f3") + c.Assert(err, check.IsNil) + c.Assert(f, check.NotNil) + c.Assert(f.Name(), check.Equals, "f3") d2, err := sourceDir.Subdir("d2") - c.Assert(err, IsNil) - c.Assert(d2, NotNil) - c.Assert(d2.Name(), Equals, "d2") - c.Assert(d2.Mode(), Equals, dirPermissions) + c.Assert(err, check.IsNil) + c.Assert(d2, check.NotNil) + c.Assert(d2.Name(), check.Equals, "d2") + c.Assert(d2.Mode(), check.Equals, dirPermissions) e, err = d2.Child(context.Background(), "f3") - c.Assert(err, IsNil) - c.Assert(e, NotNil) + c.Assert(err, check.IsNil) + c.Assert(e, check.NotNil) } diff --git a/pkg/virtualfs/virtualfs_test.go b/pkg/virtualfs/virtualfs_test.go index d38fb9ff65..367bcc6547 100644 --- a/pkg/virtualfs/virtualfs_test.go +++ b/pkg/virtualfs/virtualfs_test.go @@ -17,36 +17,36 @@ package virtualfs import ( "testing" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } type VirtualFSSuite struct{} -var _ = Suite(&VirtualFSSuite{}) +var _ = check.Suite(&VirtualFSSuite{}) -func (s *VirtualFSSuite) TestNewDirectory(c *C) { +func (s *VirtualFSSuite) TestNewDirectory(c *check.C) { for _, tc := range []struct { caseName string rootName string - checker Checker + checker check.Checker }{ { caseName: "Root Directory success", rootName: "root", - checker: IsNil, + checker: check.IsNil, }, { caseName: "Root directory with `/`", rootName: "/root", - checker: NotNil, + checker: check.NotNil, }, } { r, err := NewDirectory(tc.rootName) - c.Check(err, tc.checker, Commentf("Case %s failed", tc.caseName)) + c.Check(err, tc.checker, check.Commentf("Case %s failed", tc.caseName)) if err == nil { - c.Check(r.Name(), Equals, tc.rootName, Commentf("Case %s failed", tc.caseName)) + c.Check(r.Name(), check.Equals, tc.rootName, check.Commentf("Case %s failed", tc.caseName)) } } } diff --git a/releasenotes/notes/actionset-podlabels-annotations-915f1dfa7ee86978.yaml b/releasenotes/notes/actionset-podlabels-annotations-915f1dfa7ee86978.yaml deleted file mode 100644 index 2fa0c1d4cf..0000000000 --- a/releasenotes/notes/actionset-podlabels-annotations-915f1dfa7ee86978.yaml +++ /dev/null @@ -1 +0,0 @@ -features: Added two new fields, ``podLabels`` and ``podAnnotations``, to the ActionSet. These fields can be used to configure the labels and annotations of the Kanister function pod run by an ActionSet. diff --git a/releasenotes/notes/label-annotations-to-functions-903e5ffdff79a415.yaml b/releasenotes/notes/label-annotations-to-functions-903e5ffdff79a415.yaml deleted file mode 100644 index 113bcaedf7..0000000000 --- a/releasenotes/notes/label-annotations-to-functions-903e5ffdff79a415.yaml +++ /dev/null @@ -1 +0,0 @@ -features: Added support to customise the labels and annotations of the temporary pods that are created by some Kanister functions. diff --git a/releasenotes/notes/limit-rbac-kanister-operator-3c933af021b8d48a.yaml b/releasenotes/notes/limit-rbac-kanister-operator-3c933af021b8d48a.yaml new file mode 100644 index 0000000000..5b0bc23e5b --- /dev/null +++ b/releasenotes/notes/limit-rbac-kanister-operator-3c933af021b8d48a.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - Users upgrading from previous versions should note that the `edit` `ClusterRoleBinding` is no longer included by default. They must now create their own `Role` / `RoleBinding` with appropriate permissions for Kanister's Service Account in the application's namespace. +security: + - Enhanced security by removing the default `edit` `ClusterRoleBinding` assignment, minimizing the risk of excessive permissions. diff --git a/releasenotes/notes/postgress-tools-image-override-4882c70780e8a496.yaml b/releasenotes/notes/postgress-tools-image-override-4882c70780e8a496.yaml deleted file mode 100644 index 8a2ccd56af..0000000000 --- a/releasenotes/notes/postgress-tools-image-override-4882c70780e8a496.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -features: Support ``image`` argument for ``ExportRDSSnapshotToLocation`` and ``RestoreRDSSnapshot`` functions to override default postgres-kanister-tools image. diff --git a/releasenotes/notes/pre-release-0.111.0-478149ddf5d56f80.yaml b/releasenotes/notes/pre-release-0.111.0-478149ddf5d56f80.yaml new file mode 100644 index 0000000000..4209697002 --- /dev/null +++ b/releasenotes/notes/pre-release-0.111.0-478149ddf5d56f80.yaml @@ -0,0 +1,13 @@ +--- +features: + - Add support for Read-Only and Write Access Modes when connecting to the Kopia Repository Server in ``kando``. + - Add support for Cache Size Limits to the ``kopia server start`` command. + - Add support to pass labels and annotations to the methods that create/clone VolumeSnapshot and VolumeSnapshotContent resources. + - Support ``image`` argument for ``ExportRDSSnapshotToLocation`` and ``RestoreRDSSnapshot`` functions to override default postgres-kanister-tools image. + - Added support to customise the labels and annotations of the temporary pods that are created by some Kanister functions. + - Added two new fields, ``podLabels`` and ``podAnnotations``, to the ActionSet. These fields can be used to configure the labels and annotations of the Kanister function pod run by an ActionSet. +security: + - Update Go to 1.22.7 to pull in latest security updates. +other: + - Update ubi-minimal base image to ubi-minimal:9.4-1227.1725849298. + - Add ``stylecheck``, ``errcheck``, and ``misspel`` linters to test packages. diff --git a/releasenotes/notes/support-annotation-on-snapshotter-function-ff9b7ba2daf10427.yaml b/releasenotes/notes/support-annotation-on-snapshotter-function-ff9b7ba2daf10427.yaml deleted file mode 100644 index 989f786dd2..0000000000 --- a/releasenotes/notes/support-annotation-on-snapshotter-function-ff9b7ba2daf10427.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -features: Add support to pass labels and annotations to the methods that create/clone VolumeSnapshot and VolumeSnapshotContent resources. diff --git a/scripts/get.sh b/scripts/get.sh index 18d7e370ab..88291c0c08 100755 --- a/scripts/get.sh +++ b/scripts/get.sh @@ -140,7 +140,7 @@ cleanup() { } main() { - version="${1:-"0.110.0"}" + version="${1:-"0.111.0"}" initArch initOS verifySupported