From d0a47baa09a6a4c0eb728f25e58966dba99fe36b Mon Sep 17 00:00:00 2001 From: jbtrystram Date: Fri, 6 Sep 2024 15:58:37 +0200 Subject: [PATCH] jobs/garbage-collection: add containers Add containers tags in the garbage collection job. These can run in parrallel without issues. --- jobs/garbage-collection.Jenkinsfile | 72 ++++++++++++++++++----------- 1 file changed, 45 insertions(+), 27 deletions(-) diff --git a/jobs/garbage-collection.Jenkinsfile b/jobs/garbage-collection.Jenkinsfile index 1ca18f84..766e3333 100644 --- a/jobs/garbage-collection.Jenkinsfile +++ b/jobs/garbage-collection.Jenkinsfile @@ -54,38 +54,56 @@ lock(resource: "gc-${params.STREAM}") { def originalTimestamp = originalBuildsJson.timestamp def acl = pipecfg.s3.acl ?: 'public-read' - withCredentials([file(variable: 'GCP_KOLA_TESTS_CONFIG', credentialsId: 'gcp-image-upload-config')]) { - stage('Garbage Collection') { - pipeutils.shwrapWithAWSBuildUploadCredentials(""" - cosa cloud-prune --policy ${new_gc_policy_path} \ - --stream ${params.STREAM} ${dry_run} \ - --gcp-json-key=\${GCP_KOLA_TESTS_CONFIG} \ - --acl=${acl} \ - --aws-config-file \${AWS_BUILD_UPLOAD_CONFIG} - """) - } - } + // containers tags and cloud artifacts can be GCed in parallel + parallel { + stages { + withCredentials([file(variable: 'GCP_KOLA_TESTS_CONFIG', credentialsId: 'gcp-image-upload-config')]) { + stage('Cloud artifacts GC') { + pipeutils.shwrapWithAWSBuildUploadCredentials(""" + cosa cloud-prune --policy ${new_gc_policy_path} \ + --stream ${params.STREAM} ${dry_run} \ + --gcp-json-key=\${GCP_KOLA_TESTS_CONFIG} \ + --acl=${acl} \ + --aws-config-file \${AWS_BUILD_UPLOAD_CONFIG} + """) + } + } - def currentBuildsJson = readJSON file: 'builds/builds.json' - def currentTimestamp = currentBuildsJson.timestamp + def currentBuildsJson = readJSON file: 'builds/builds.json' + def currentTimestamp = currentBuildsJson.timestamp - // If the timestamp on builds.json after the 'Garbage Collection' step - // is the same as before, that means, there were no resources to be pruned - // and hence, no need to update the builds.json. - if (originalTimestamp != currentTimestamp) { - // Nested lock for the Upload Builds JSON step - lock(resource: "builds-json-${params.STREAM}") { - stage('Upload Builds JSON') { - pipeutils.shwrapWithAWSBuildUploadCredentials(""" - cosa cloud-prune --policy ${new_gc_policy_path} \ - --stream ${params.STREAM} \ - --upload-builds-json ${dry_run} \ - --acl=${acl} \ - --aws-config-file \${AWS_BUILD_UPLOAD_CONFIG} + // If the timestamp on builds.json after the 'Garbage Collection' step + // is the same as before, that means, there were no resources to be pruned + // and hence, no need to update the builds.json. + if (originalTimestamp != currentTimestamp) { + // Nested lock for the Upload Builds JSON step + lock(resource: "builds-json-${params.STREAM}") { + stage('Upload Builds JSON') { + pipeutils.shwrapWithAWSBuildUploadCredentials(""" + cosa cloud-prune --policy ${new_gc_policy_path} \ + --stream ${params.STREAM} \ + --upload-builds-json ${dry_run} \ + --acl=${acl} \ + --aws-config-file \${AWS_BUILD_UPLOAD_CONFIG} + """) + } + } + } + } + stage('Container tags GC') { + // get repo url from pipecfg + def registry = pipecfg.registry_repos.oscontainer.repo + pipecfg, params.STREAM, params.VERSION) + withCredentials([file(variable: 'REGISTRY_SECRET', + credentialsId: 'oscontainer-push-registry-secret')]) { + pipeutils.shwrap(""" + cosa container-prune --policy ${new_gc_policy_path} \ + --registry-auth-file=\${REGISTRY_SECRET} \ + --stream ${params.STREAM} ${dry_run} \ + ${repo} """) } } - } currentBuild.result = 'SUCCESS' currentBuild.description = "${build_description} ✓"