From bdd089045f1dc1603e85b3d84bf0e40740b0e8b0 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Tue, 23 Jan 2024 18:28:45 +0000 Subject: [PATCH 1/4] controller: Run destruction if creation starts --- controller/src/resource_controller/action.rs | 15 +++++++++++++-- controller/src/resource_controller/mod.rs | 18 +++++++++++++++++- model/src/constants.rs | 1 + 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/controller/src/resource_controller/action.rs b/controller/src/resource_controller/action.rs index d4a09dcc..cf0a0dce 100644 --- a/controller/src/resource_controller/action.rs +++ b/controller/src/resource_controller/action.rs @@ -6,7 +6,9 @@ use kube::core::object::HasSpec; use kube::ResourceExt; use log::{debug, trace}; use testsys_model::clients::{AllowNotFound, CrdClient, TestClient}; -use testsys_model::constants::{FINALIZER_CREATION_JOB, FINALIZER_MAIN, FINALIZER_RESOURCE}; +use testsys_model::constants::{ + FINALIZER_CLEANUP_REQUIRED, FINALIZER_CREATION_JOB, FINALIZER_MAIN, FINALIZER_RESOURCE, +}; use testsys_model::{CrdExt, DestructionPolicy, ResourceAction, TaskState, TestUserState}; /// The action that the controller needs to take in order to reconcile the [`Resource`]. @@ -21,6 +23,7 @@ pub(super) enum CreationAction { Initialize, AddMainFinalizer, AddJobFinalizer, + AddCleanupFinalizer, StartJob, WaitForDependency(String), WaitForConflict(String), @@ -39,6 +42,7 @@ pub(super) enum DestructionAction { StartDestructionJob, Wait, RemoveDestructionJob, + RemoveCleanupFinalizer, RemoveResourceFinalizer, RemoveMainFinalizer, Error(ErrorState), @@ -167,6 +171,9 @@ async fn creation_not_done_action( if !is_task_state_running && !r.resource().has_finalizer(FINALIZER_CREATION_JOB) { return Ok(CreationAction::AddJobFinalizer); } + if !is_task_state_running && !r.resource().has_finalizer(FINALIZER_CLEANUP_REQUIRED) { + return Ok(CreationAction::AddCleanupFinalizer); + } let job_state = r.get_job_state(ResourceAction::Create).await?; match job_state { JobState::None if !is_task_state_running => Ok(CreationAction::StartJob), @@ -254,7 +261,9 @@ async fn destruction_action(r: &ResourceInterface) -> Result Ok(DestructionAction::StartResourceDeletion) } else if let Some(creation_cleanup_action) = creation_cleanup_action(r).await? { Ok(creation_cleanup_action) - } else if r.resource().has_finalizer(FINALIZER_RESOURCE) { + } else if r.resource().has_finalizer(FINALIZER_RESOURCE) + || r.resource().has_finalizer(FINALIZER_CLEANUP_REQUIRED) + { destruction_action_with_resources(r).await } else { destruction_action_without_resources(r).await @@ -300,6 +309,8 @@ async fn destruction_action_with_resources(r: &ResourceInterface) -> Result Res .await .with_context(|| format!("Unable to creation job finalizer to '{}'", r.name()))?; } + CreationAction::AddCleanupFinalizer => { + let _ = r + .resource_client() + .add_finalizer(FINALIZER_CLEANUP_REQUIRED, r.resource()) + .await + .with_context(|| format!("Unable to add resource finalizer to '{}'", r.name()))?; + } CreationAction::StartJob => r.start_job(ResourceAction::Create).await?, CreationAction::WaitForCreation => { debug!("waiting for creation of resource '{}'", r.name()) @@ -148,6 +156,14 @@ async fn do_destruction_action(r: ResourceInterface, action: DestructionAction) DestructionAction::RemoveDestructionJob => { r.remove_job(ResourceAction::Destroy).await?; } + DestructionAction::RemoveCleanupFinalizer => { + r.resource_client() + .remove_finalizer(FINALIZER_CLEANUP_REQUIRED, r.resource()) + .await + .with_context(|| { + format!("Unable to cleanup resource finalizer from '{}'", r.name()) + })?; + } DestructionAction::RemoveResourceFinalizer => { r.resource_client() .remove_finalizer(FINALIZER_RESOURCE, r.resource()) diff --git a/model/src/constants.rs b/model/src/constants.rs index 243c3832..a4c9836d 100644 --- a/model/src/constants.rs +++ b/model/src/constants.rs @@ -54,6 +54,7 @@ pub const APP_CREATED_BY: &str = "app.kubernetes.io/created-by"; pub const FINALIZER_CREATION_JOB: &str = testsys!("resource-creation-job"); pub const FINALIZER_MAIN: &str = testsys!("controlled"); pub const FINALIZER_RESOURCE: &str = testsys!("resources-exist"); +pub const FINALIZER_CLEANUP_REQUIRED: &str = testsys!("resources-cleanup-required"); pub const FINALIZER_TEST_JOB: &str = testsys!("test-job"); pub const TESTSYS_RESULTS_FILE: &str = "/output.tar.gz"; From e40e6ab24058dcb8f93fa9c6ed3b16fa3864bebc Mon Sep 17 00:00:00 2001 From: ecpullen Date: Tue, 23 Jan 2024 18:47:17 +0000 Subject: [PATCH 2/4] karpenter: Add best effort cleanup --- .../ec2_karpenter_provider.rs | 96 ++++++++++++------- 1 file changed, 64 insertions(+), 32 deletions(-) diff --git a/bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/ec2_karpenter_provider.rs b/bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/ec2_karpenter_provider.rs index 39044874..386eb2d6 100644 --- a/bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/ec2_karpenter_provider.rs +++ b/bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/ec2_karpenter_provider.rs @@ -54,6 +54,10 @@ pub struct ProductionMemo { /// Name of the cluster the EC2 instances are for pub cluster_name: String, + + pub tainted_nodegroup_exists: bool, + pub karpenter_namespace_exists: bool, + pub inflate_deployment_exists: bool, } impl Configuration for ProductionMemo {} @@ -331,6 +335,12 @@ impl Create for Ec2KarpenterCreator { format!("Failed to create nodegroup with status code {}", status), )); } + memo.tainted_nodegroup_exists = true; + memo.current_status = "Tainting nodegroup".to_string(); + client + .send_info(memo.clone()) + .await + .context(resources, "Error sending message")?; info!("Applying node taint and scaling nodegroup"); eks_client @@ -392,6 +402,13 @@ impl Create for Ec2KarpenterCreator { )); } + memo.karpenter_namespace_exists = true; + memo.current_status = "Karpenter Installed".to_string(); + client + .send_info(memo.clone()) + .await + .context(resources, "Error sending message")?; + info!("Karpenter has been installed to the cluster. Creating EC2 provisioner"); let requirements = if spec.configuration.instance_types.is_empty() { @@ -535,6 +552,13 @@ spec: .await .context(resources, "Unable to create deployment")?; + memo.inflate_deployment_exists = true; + memo.current_status = "Waiting for Karpenter Nodes".to_string(); + client + .send_info(memo.clone()) + .await + .context(resources, "Error sending message")?; + info!("Waiting for new nodes to be created"); tokio::time::timeout( Duration::from_secs(600), @@ -886,6 +910,10 @@ impl Destroy for Ec2KarpenterDestroyer { })?; let resources = Resources::Remaining; + if !memo.tainted_nodegroup_exists { + return Ok(()); + } + info!("Getting AWS secret"); memo.current_status = "Getting AWS secret".to_string(); client @@ -970,41 +998,45 @@ impl Destroy for Ec2KarpenterDestroyer { "Unable to create k8s client from cluster kubeconfig", )?; - info!("Deleting inflate deployment"); - let deployment_api = Api::::namespaced(k8s_client.clone(), "default"); - deployment_api - .delete("inflate", &Default::default()) - .await - .context(resources, "Unable to delete deployment")?; + if memo.inflate_deployment_exists { + info!("Deleting inflate deployment"); + let deployment_api = Api::::namespaced(k8s_client.clone(), "default"); + deployment_api + .delete("inflate", &Default::default()) + .await + .context(resources, "Unable to delete deployment")?; - let node_api = Api::::all(k8s_client); + let node_api = Api::::all(k8s_client); - info!("Waiting for karpenter nodes to be cleaned up"); - tokio::time::timeout( - Duration::from_secs(600), - wait_for_nodes(&node_api, 2, Ordering::Equal), - ) - .await - .context( - resources, - "Timed out waiting for karpenter nodes to join the cluster", - )??; - - info!("Uninstalling karpenter"); - let status = Command::new("helm") - .env("KUBECONFIG", CLUSTER_KUBECONFIG) - .args(["uninstall", "karpenter", "--namespace", "karpenter"]) - .status() - .context(Resources::Remaining, "Failed to create helm template")?; + info!("Waiting for karpenter nodes to be cleaned up"); + tokio::time::timeout( + Duration::from_secs(600), + wait_for_nodes(&node_api, 2, Ordering::Equal), + ) + .await + .context( + resources, + "Timed out waiting for karpenter nodes to leave the cluster", + )??; + } - if !status.success() { - return Err(ProviderError::new_with_context( - Resources::Remaining, - format!( - "Failed to launch karpenter template with status code {}", - status - ), - )); + if memo.karpenter_namespace_exists { + info!("Uninstalling karpenter"); + let status = Command::new("helm") + .env("KUBECONFIG", CLUSTER_KUBECONFIG) + .args(["uninstall", "karpenter", "--namespace", "karpenter"]) + .status() + .context(Resources::Remaining, "Failed to create helm template")?; + + if !status.success() { + return Err(ProviderError::new_with_context( + Resources::Remaining, + format!( + "Failed to launch karpenter template with status code {}", + status + ), + )); + } } info!("Deleting tainted nodegroup"); From e17f596f00761df3532f58d9df593b5aa9e566ea Mon Sep 17 00:00:00 2001 From: ecpullen Date: Tue, 23 Jan 2024 18:50:48 +0000 Subject: [PATCH 3/4] eks: Add check for provisioning cluster --- .../agents/src/bin/eks-resource-agent/eks_provider.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/bottlerocket/agents/src/bin/eks-resource-agent/eks_provider.rs b/bottlerocket/agents/src/bin/eks-resource-agent/eks_provider.rs index 469868c6..85783eb4 100644 --- a/bottlerocket/agents/src/bin/eks-resource-agent/eks_provider.rs +++ b/bottlerocket/agents/src/bin/eks-resource-agent/eks_provider.rs @@ -49,6 +49,8 @@ pub struct ProductionMemo { // The role arn that is being assumed. pub assume_role: Option, + + pub provisioning_started: bool, } impl Configuration for ProductionMemo {} @@ -380,6 +382,7 @@ impl Create for EksCreator { if do_create { info!("Creating cluster with eksctl"); memo.current_status = "Creating cluster".to_string(); + memo.provisioning_started = true; client .send_info(memo.clone()) .await @@ -897,8 +900,12 @@ impl Destroy for EksDestroyer { .await .context(Resources::Remaining, "Unable to get info from client")?; + if !memo.provisioning_started { + return Ok(()); + } + let cluster_name = match &memo.cluster_name { - Some(x) => x, + Some(cluster_name) => cluster_name, None => { return Err(ProviderError::new_with_context( Resources::Unknown, From 76b3c76b98bdd5781493ef67a79a3f7a2935e8e3 Mon Sep 17 00:00:00 2001 From: ecpullen Date: Tue, 23 Jan 2024 20:01:51 +0000 Subject: [PATCH 4/4] fix resource destruction and update karpenter --- Dockerfile | 2 + .../cloudformation.yaml | 331 +++++++++ .../ec2_karpenter_provider.rs | 661 ++++++++++-------- bottlerocket/types/src/agent_config.rs | 3 + controller/src/resource_controller/action.rs | 5 +- 5 files changed, 702 insertions(+), 300 deletions(-) create mode 100644 bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/cloudformation.yaml diff --git a/Dockerfile b/Dockerfile index e4f2169c..45b0aa91 100644 --- a/Dockerfile +++ b/Dockerfile @@ -90,6 +90,8 @@ COPY --from=tools /licenses/helm /licenses/helm # Copy ec2-karpenter-resource-agent COPY --from=build-src /src/bottlerocket/agents/bin/ec2-karpenter-resource-agent ./ +# Copy cloudformation template +COPY --from=build-src /src/bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/cloudformation.yaml /local/cloudformation.yaml COPY --from=build-src /usr/share/licenses/testsys /licenses/testsys ENTRYPOINT ["./ec2-karpenter-resource-agent"] diff --git a/bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/cloudformation.yaml b/bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/cloudformation.yaml new file mode 100644 index 00000000..9c1a993c --- /dev/null +++ b/bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/cloudformation.yaml @@ -0,0 +1,331 @@ +AWSTemplateFormatVersion: "2010-09-09" +Description: Resources used by https://github.com/aws/karpenter +Parameters: + ClusterName: + Type: String + Description: "EKS cluster name" +Resources: + KarpenterNodeRole: + Type: "AWS::IAM::Role" + Properties: + RoleName: !Sub "KarpenterNodeRole-${ClusterName}" + Path: / + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Principal: + Service: + !Sub "ec2.${AWS::URLSuffix}" + Action: + - "sts:AssumeRole" + ManagedPolicyArns: + - !Sub "arn:${AWS::Partition}:iam::aws:policy/AmazonEKS_CNI_Policy" + - !Sub "arn:${AWS::Partition}:iam::aws:policy/AmazonEKSWorkerNodePolicy" + - !Sub "arn:${AWS::Partition}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + - !Sub "arn:${AWS::Partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" + KarpenterControllerPolicy: + Type: AWS::IAM::ManagedPolicy + Properties: + ManagedPolicyName: !Sub "KarpenterControllerPolicy-${ClusterName}" + # The PolicyDocument must be in JSON string format because we use a StringEquals condition that uses an interpolated + # value in one of its key parameters which isn't natively supported by CloudFormation + PolicyDocument: !Sub | + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowScopedEC2InstanceActions", + "Effect": "Allow", + "Resource": [ + "arn:${AWS::Partition}:ec2:${AWS::Region}::image/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}::snapshot/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:security-group/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:subnet/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*" + ], + "Action": [ + "ec2:RunInstances", + "ec2:CreateFleet" + ] + }, + { + "Sid": "AllowScopedEC2InstanceActionsWithTags", + "Effect": "Allow", + "Resource": [ + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:fleet/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:instance/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:volume/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:network-interface/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:spot-instances-request/*" + ], + "Action": [ + "ec2:RunInstances", + "ec2:CreateFleet", + "ec2:CreateLaunchTemplate" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned" + }, + "StringLike": { + "aws:RequestTag/karpenter.sh/nodepool": "*" + } + } + }, + { + "Sid": "AllowScopedResourceCreationTagging", + "Effect": "Allow", + "Resource": [ + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:fleet/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:instance/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:volume/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:network-interface/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:spot-instances-request/*" + ], + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "ec2:CreateAction": [ + "RunInstances", + "CreateFleet", + "CreateLaunchTemplate" + ] + }, + "StringLike": { + "aws:RequestTag/karpenter.sh/nodepool": "*" + } + } + }, + { + "Sid": "AllowScopedResourceTagging", + "Effect": "Allow", + "Resource": "arn:${AWS::Partition}:ec2:${AWS::Region}:*:instance/*", + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned" + }, + "StringLike": { + "aws:ResourceTag/karpenter.sh/nodepool": "*" + }, + "ForAllValues:StringEquals": { + "aws:TagKeys": [ + "karpenter.sh/nodeclaim", + "Name" + ] + } + } + }, + { + "Sid": "AllowScopedDeletion", + "Effect": "Allow", + "Resource": [ + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:instance/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*" + ], + "Action": [ + "ec2:TerminateInstances", + "ec2:DeleteLaunchTemplate" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned" + }, + "StringLike": { + "aws:ResourceTag/karpenter.sh/nodepool": "*" + } + } + }, + { + "Sid": "AllowRegionalReadActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "ec2:DescribeAvailabilityZones", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeInstanceTypes", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets" + ], + "Condition": { + "StringEquals": { + "aws:RequestedRegion": "${AWS::Region}" + } + } + }, + { + "Sid": "AllowSSMReadActions", + "Effect": "Allow", + "Resource": "arn:${AWS::Partition}:ssm:${AWS::Region}::parameter/aws/service/*", + "Action": "ssm:GetParameter" + }, + { + "Sid": "AllowPricingReadActions", + "Effect": "Allow", + "Resource": "*", + "Action": "pricing:GetProducts" + }, + { + "Sid": "AllowInterruptionQueueActions", + "Effect": "Allow", + "Resource": "${KarpenterInterruptionQueue.Arn}", + "Action": [ + "sqs:DeleteMessage", + "sqs:GetQueueUrl", + "sqs:ReceiveMessage" + ] + }, + { + "Sid": "AllowPassingInstanceRole", + "Effect": "Allow", + "Resource": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/KarpenterNodeRole-${ClusterName}", + "Action": "iam:PassRole", + "Condition": { + "StringEquals": { + "iam:PassedToService": "ec2.amazonaws.com" + } + } + }, + { + "Sid": "AllowScopedInstanceProfileCreationActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:CreateInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" + }, + "StringLike": { + "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } + }, + { + "Sid": "AllowScopedInstanceProfileTagActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:TagInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:ResourceTag/topology.kubernetes.io/region": "${AWS::Region}", + "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" + }, + "StringLike": { + "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass": "*", + "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } + }, + { + "Sid": "AllowScopedInstanceProfileActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:AddRoleToInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:DeleteInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:ResourceTag/topology.kubernetes.io/region": "${AWS::Region}" + }, + "StringLike": { + "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } + }, + { + "Sid": "AllowInstanceProfileReadActions", + "Effect": "Allow", + "Resource": "*", + "Action": "iam:GetInstanceProfile" + }, + { + "Sid": "AllowAPIServerEndpointDiscovery", + "Effect": "Allow", + "Resource": "arn:${AWS::Partition}:eks:${AWS::Region}:${AWS::AccountId}:cluster/${ClusterName}", + "Action": "eks:DescribeCluster" + } + ] + } + KarpenterInterruptionQueue: + Type: AWS::SQS::Queue + Properties: + QueueName: !Sub "${ClusterName}" + MessageRetentionPeriod: 300 + SqsManagedSseEnabled: true + KarpenterInterruptionQueuePolicy: + Type: AWS::SQS::QueuePolicy + Properties: + Queues: + - !Ref KarpenterInterruptionQueue + PolicyDocument: + Id: EC2InterruptionPolicy + Statement: + - Effect: Allow + Principal: + Service: + - events.amazonaws.com + - sqs.amazonaws.com + Action: sqs:SendMessage + Resource: !GetAtt KarpenterInterruptionQueue.Arn + ScheduledChangeRule: + Type: 'AWS::Events::Rule' + Properties: + EventPattern: + source: + - aws.health + detail-type: + - AWS Health Event + Targets: + - Id: KarpenterInterruptionQueueTarget + Arn: !GetAtt KarpenterInterruptionQueue.Arn + SpotInterruptionRule: + Type: 'AWS::Events::Rule' + Properties: + EventPattern: + source: + - aws.ec2 + detail-type: + - EC2 Spot Instance Interruption Warning + Targets: + - Id: KarpenterInterruptionQueueTarget + Arn: !GetAtt KarpenterInterruptionQueue.Arn + RebalanceRule: + Type: 'AWS::Events::Rule' + Properties: + EventPattern: + source: + - aws.ec2 + detail-type: + - EC2 Instance Rebalance Recommendation + Targets: + - Id: KarpenterInterruptionQueueTarget + Arn: !GetAtt KarpenterInterruptionQueue.Arn + InstanceStateChangeRule: + Type: 'AWS::Events::Rule' + Properties: + EventPattern: + source: + - aws.ec2 + detail-type: + - EC2 Instance State-change Notification + Targets: + - Id: KarpenterInterruptionQueueTarget + Arn: !GetAtt KarpenterInterruptionQueue.Arn diff --git a/bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/ec2_karpenter_provider.rs b/bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/ec2_karpenter_provider.rs index 386eb2d6..4891939f 100644 --- a/bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/ec2_karpenter_provider.rs +++ b/bottlerocket/agents/src/bin/ec2-karpenter-resource-agent/ec2_karpenter_provider.rs @@ -1,5 +1,6 @@ use agent_utils::aws::aws_config; use agent_utils::json_display; +use aws_sdk_cloudformation::model::{Capability, Parameter, StackStatus}; use aws_sdk_ec2::model::Tag; use aws_sdk_eks::model::{ NodegroupScalingConfig, NodegroupStatus, Taint, TaintEffect, UpdateTaintsPayload, @@ -11,7 +12,7 @@ use k8s_openapi::api::apps::v1::Deployment; use k8s_openapi::api::core::v1::Node; use kube::config::{KubeConfigOptions, Kubeconfig}; use kube::{Api, Client, Config}; -use log::{debug, info}; +use log::{debug, info, warn}; use resource_agent::clients::InfoClient; use resource_agent::provider::{ Create, Destroy, IntoProviderError, ProviderError, ProviderResult, Resources, Spec, @@ -25,11 +26,13 @@ use std::fs; use std::process::Command; use std::time::Duration; use testsys_model::{Configuration, SecretName}; +use tokio::fs::read_to_string; -const KARPENTER_VERSION: &str = "v0.27.1"; +const KARPENTER_VERSION: &str = "v0.33.1"; const CLUSTER_KUBECONFIG: &str = "/local/cluster.kubeconfig"; const PROVISIONER_YAML: &str = "/local/provisioner.yaml"; const TAINTED_NODEGROUP_NAME: &str = "tainted-nodegroup"; +const TEMPLATE_PATH: &str = "/local/cloudformation.yaml"; #[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -55,6 +58,7 @@ pub struct ProductionMemo { /// Name of the cluster the EC2 instances are for pub cluster_name: String, + pub cloud_formation_stack_exists: bool, pub tainted_nodegroup_exists: bool, pub karpenter_namespace_exists: bool, pub inflate_deployment_exists: bool, @@ -91,6 +95,13 @@ impl Create for Ec2KarpenterCreator { json_display(&spec) ); + let karpenter_version = spec + .configuration + .karpenter_version + .unwrap_or_else(|| KARPENTER_VERSION.to_string()); + + let stack_name = format!("Karpenter-{}", spec.configuration.cluster_name); + let mut resources = Resources::Unknown; let mut memo: ProductionMemo = client @@ -131,7 +142,7 @@ impl Create for Ec2KarpenterCreator { let ec2_client = aws_sdk_ec2::Client::new(&shared_config); let eks_client = aws_sdk_eks::Client::new(&shared_config); let sts_client = aws_sdk_sts::Client::new(&shared_config); - let iam_client = aws_sdk_iam::Client::new(&shared_config); + let cfn_client = aws_sdk_cloudformation::Client::new(&shared_config); info!("Writing cluster's kubeconfig to {}", CLUSTER_KUBECONFIG); let status = Command::new("eksctl") @@ -164,11 +175,45 @@ impl Create for Ec2KarpenterCreator { .to_string(); info!("Using account '{account_id}'"); - info!("Checking for KarpenterInstanceNodeRole"); - create_karpenter_instance_role(&iam_client).await?; + memo.cloud_formation_stack_exists = true; + client + .send_info(memo.clone()) + .await + .context(resources, "Error sending cluster creation message")?; + + info!("Launching karpenter cloud formation stack"); + cfn_client + .create_stack() + .stack_name(&stack_name) + .template_body( + read_to_string(TEMPLATE_PATH) + .await + .context(Resources::Clear, "Unable to read cloudformation template")?, + ) + .capabilities(Capability::CapabilityNamedIam) + .parameters( + Parameter::builder() + .parameter_key("ClusterName") + .parameter_value(&spec.configuration.cluster_name) + .build(), + ) + .send() + .await + .context( + Resources::Remaining, + "Unable to create cloudformation stack", + )?; - info!("Checking for KarpenterControllerPolicy"); - create_controller_policy(&iam_client, &account_id).await?; + tokio::time::timeout( + Duration::from_secs(600), + wait_for_cloudformation_stack( + stack_name.to_string(), + StackStatus::CreateComplete, + &cfn_client, + ), + ) + .await + .context(resources, "Timed out waiting for cloud formation stack.")??; info!( "Adding associate-iam-oidc-provider to {}", @@ -220,7 +265,11 @@ impl Create for Ec2KarpenterCreator { ) .as_str(), "--attach-policy-arn", - format!("arn:aws:iam::{account_id}:policy/KarpenterControllerPolicy").as_str(), + format!( + "arn:aws:iam::{account_id}:policy/KarpenterControllerPolicy-{}", + &spec.configuration.cluster_name + ) + .as_str(), "--role-only", "--approve", ]) @@ -295,7 +344,10 @@ impl Create for Ec2KarpenterCreator { "--cluster", spec.configuration.cluster_name.as_str(), "--arn", - &format!("arn:aws:iam::{account_id}:role/KarpenterInstanceNodeRole"), + &format!( + "arn:aws:iam::{account_id}:role/KarpenterNodeRole-{}", + spec.configuration.cluster_name + ), "--username", "system:node:{{EC2PrivateDNSName}}", "--group", @@ -377,17 +429,30 @@ impl Create for Ec2KarpenterCreator { let status = Command::new("helm") .env("KUBECONFIG", CLUSTER_KUBECONFIG) .args([ - "upgrade", - "--install", - "--namespace", "karpenter", - "--create-namespace", "karpenter", + "upgrade", + "--install", + "karpenter", + "--namespace", + "karpenter", + "--create-namespace", "oci://public.ecr.aws/karpenter/karpenter", - "--version", KARPENTER_VERSION, - "--set", "settings.aws.defaultInstanceProfile=KarpenterInstanceNodeRole", - "--set", &format!("settings.aws.clusterEndpoint={}", spec.configuration.endpoint), - "--set", &format!("settings.aws.clusterName={}", spec.configuration.cluster_name), + "--version", + &karpenter_version, + "--set", + &format!( + "aws.defaultInstanceProfile=KarpenterNodeRole-{}", + spec.configuration.cluster_name + ), + "--set", + &format!("settings.clusterName={}", spec.configuration.cluster_name), + "--set", + &format!( + "settings.aws.clusterEndpoint={}", + spec.configuration.endpoint + ), "--set", &format!(r#"serviceAccount.annotations.eks\.amazonaws\.com/role-arn=arn:aws:iam::{account_id}:role/KarpenterControllerRole-{}"#, spec.configuration.cluster_name), - "--wait", "--debug" + "--wait", + "--debug", ]) .status() .context(Resources::Remaining, "Failed to create helm template")?; @@ -415,9 +480,9 @@ impl Create for Ec2KarpenterCreator { Default::default() } else { format!( - r#" - key: node.kubernetes.io/instance-type - operator: In - values: [{}] + r#" - key: node.kubernetes.io/instance-type + operator: In + values: [{}] "#, spec.configuration.instance_types.join(",") @@ -451,35 +516,40 @@ impl Create for Ec2KarpenterCreator { }; let provisioner = format!( - r#"apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner + r#"apiVersion: karpenter.sh/v1beta1 +kind: NodePool metadata: name: default -spec: - ttlSecondsAfterEmpty: 1 - providerRef: - name: my-provider - requirements: - - key: kubernetes.io/arch - operator: In - values: ["arm64", "amd64"] +spec: + template: + spec: + nodeClassRef: + name: my-provider + requirements: + - key: kubernetes.io/arch + operator: In + values: ["arm64", "amd64"] {} --- -apiVersion: karpenter.k8s.aws/v1alpha1 -kind: AWSNodeTemplate +apiVersion: karpenter.k8s.aws/v1beta1 +kind: EC2NodeClass metadata: name: my-provider spec: amiFamily: Bottlerocket - amiSelector: - aws-ids: {} - subnetSelector: - karpenter.sh/discovery: {} - securityGroupSelector: - karpenter.sh/discovery: {} + role: "KarpenterNodeRole-{}" + amiSelectorTerms: + - id: {} + subnetSelectorTerms: + - tags: + karpenter.sh/discovery: {} + securityGroupSelectorTerms: + - tags: + karpenter.sh/discovery: {} {} "#, requirements, + spec.configuration.cluster_name, spec.configuration.node_ami, spec.configuration.cluster_name, spec.configuration.cluster_name, @@ -721,167 +791,6 @@ async fn wait_for_nodegroup( } } -async fn create_karpenter_instance_role(iam_client: &aws_sdk_iam::Client) -> ProviderResult<()> { - if iam_client - .get_instance_profile() - .instance_profile_name("KarpenterInstanceNodeRole") - .send() - .await - .map(|instance_profile| instance_profile.instance_profile().is_some()) - .unwrap_or_default() - { - info!("KarpenterInstanceNodeRole instance profile already exists"); - return Ok(()); - } - - if iam_client - .get_role() - .role_name("KarpenterInstanceNodeRole") - .send() - .await - .is_ok() - { - info!("KarpenterInstanceNodeRole already exists"); - } else { - info!("Creating karpenter instance role"); - iam_client - .create_role() - .role_name("KarpenterInstanceNodeRole") - .assume_role_policy_document( - r#"{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - }, - "Action": "sts:AssumeRole" - } - ] - }"#, - ) - .send() - .await - .context( - Resources::Clear, - "Unable to create KarpenterInstanceNodeRole", - )?; - let policies = vec![ - "AmazonEKSWorkerNodePolicy", - "AmazonEKS_CNI_Policy", - "AmazonEC2ContainerRegistryReadOnly", - "AmazonSSMManagedInstanceCore", - ]; - for policy in policies { - iam_client - .attach_role_policy() - .role_name("KarpenterInstanceNodeRole") - .policy_arn(format!("arn:aws:iam::aws:policy/{}", policy)) - .send() - .await - .context( - Resources::Clear, - format!( - "Unable to add policy {} to KarpenterInstanceNodeRole", - policy - ), - )?; - } - } - - info!("Creating instance profile: 'KarpenterInstanceNodeRole'"); - iam_client - .create_instance_profile() - .instance_profile_name("KarpenterInstanceNodeRole") - .send() - .await - .context(Resources::Clear, "Unable to create instance profile")?; - - iam_client - .add_role_to_instance_profile() - .instance_profile_name("KarpenterInstanceNodeRole") - .role_name("KarpenterInstanceNodeRole") - .send() - .await - .context(Resources::Clear, "Unable to add role to InstanceProfile")?; - - Ok(()) -} - -async fn create_controller_policy( - iam_client: &aws_sdk_iam::Client, - account_id: &str, -) -> ProviderResult<()> { - if iam_client - .get_policy() - .policy_arn(format!( - "arn:aws:iam::{}:policy/KarpenterControllerPolicy", - account_id - )) - .send() - .await - .is_ok() - { - info!("KarpenterControllerPolicy already exists"); - return Ok(()); - } - - info!("Creating controller policy"); - iam_client - .create_policy() - .policy_name("KarpenterControllerPolicy") - .policy_document( - r#"{ - "Statement": [ - { - "Action": [ - "ssm:GetParameter", - "iam:PassRole", - "ec2:DescribeImages", - "ec2:RunInstances", - "ec2:DescribeSubnets", - "ec2:DescribeSecurityGroups", - "ec2:DescribeLaunchTemplates", - "ec2:DescribeInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstanceTypeOfferings", - "ec2:DescribeAvailabilityZones", - "ec2:DeleteLaunchTemplate", - "ec2:CreateTags", - "ec2:CreateLaunchTemplate", - "ec2:CreateFleet", - "ec2:DescribeSpotPriceHistory", - "pricing:GetProducts" - ], - "Effect": "Allow", - "Resource": "*", - "Sid": "Karpenter" - }, - { - "Action": "ec2:TerminateInstances", - "Condition": { - "StringLike": { - "ec2:ResourceTag/Name": "*karpenter*" - } - }, - "Effect": "Allow", - "Resource": "*", - "Sid": "ConditionalEC2Termination" - } - ], - "Version": "2012-10-17" - }"#, - ) - .send() - .await - .context( - Resources::Clear, - "Unable to create Karpenter controller policy", - )?; - Ok(()) -} - /// This is the object that will destroy ec2 instances. pub struct Ec2KarpenterDestroyer {} @@ -910,7 +819,7 @@ impl Destroy for Ec2KarpenterDestroyer { })?; let resources = Resources::Remaining; - if !memo.tainted_nodegroup_exists { + if !memo.cloud_formation_stack_exists { return Ok(()); } @@ -942,134 +851,288 @@ impl Destroy for Ec2KarpenterDestroyer { ) .await .context(resources, "Error creating config")?; + if memo.tainted_nodegroup_exists { + let eks_client = aws_sdk_eks::Client::new(&shared_config); + + info!("Writing cluster's kubeconfig to {}", CLUSTER_KUBECONFIG); + let status = Command::new("eksctl") + .args([ + "utils", + "write-kubeconfig", + "-r", + &spec.configuration.region, + &format!("--cluster={}", &spec.configuration.cluster_name), + &format!("--kubeconfig={}", CLUSTER_KUBECONFIG), + ]) + .status() + .context(Resources::Remaining, "Failed write kubeconfig")?; - let eks_client = aws_sdk_eks::Client::new(&shared_config); - - info!("Writing cluster's kubeconfig to {}", CLUSTER_KUBECONFIG); - let status = Command::new("eksctl") - .args([ - "utils", - "write-kubeconfig", - "-r", - &spec.configuration.region, - &format!("--cluster={}", &spec.configuration.cluster_name), - &format!("--kubeconfig={}", CLUSTER_KUBECONFIG), - ]) - .status() - .context(Resources::Remaining, "Failed write kubeconfig")?; - - if !status.success() { - return Err(ProviderError::new_with_context( - Resources::Remaining, - format!("Failed write kubeconfig with status code {}", status), - )); - } - - info!("Removing taint from tainted nodegroup"); - eks_client - .update_nodegroup_config() - .cluster_name(&spec.configuration.cluster_name) - .nodegroup_name(TAINTED_NODEGROUP_NAME) - .taints( - UpdateTaintsPayload::builder() - .remove_taints( - Taint::builder() - .key("sonobuoy") - .value("ignore") - .effect(TaintEffect::NoSchedule) - .build(), - ) - .build(), - ) - .send() - .await - .context(resources, "Unable to apply taints")?; - - info!("Creating K8s Client from cluster kubeconfig"); - let kubeconfig = Kubeconfig::read_from(CLUSTER_KUBECONFIG) - .context(resources, "Unable to create config from cluster kubeconfig")?; - let k8s_client: Client = - Config::from_custom_kubeconfig(kubeconfig, &KubeConfigOptions::default()) - .await - .context(resources, "Unable to convert kubeconfig")? - .try_into() - .context( - resources, - "Unable to create k8s client from cluster kubeconfig", - )?; - - if memo.inflate_deployment_exists { - info!("Deleting inflate deployment"); - let deployment_api = Api::::namespaced(k8s_client.clone(), "default"); - deployment_api - .delete("inflate", &Default::default()) - .await - .context(resources, "Unable to delete deployment")?; - - let node_api = Api::::all(k8s_client); + if !status.success() { + return Err(ProviderError::new_with_context( + Resources::Remaining, + format!("Failed write kubeconfig with status code {}", status), + )); + } - info!("Waiting for karpenter nodes to be cleaned up"); + info!("Checking that tainted nodegroup is ready"); tokio::time::timeout( Duration::from_secs(600), - wait_for_nodes(&node_api, 2, Ordering::Equal), + wait_for_nodegroup( + &eks_client, + &spec.configuration.cluster_name, + TAINTED_NODEGROUP_NAME, + ), ) .await .context( resources, - "Timed out waiting for karpenter nodes to leave the cluster", + "Timed out waiting for tainted nodegroup to be `ACTIVE`", )??; - } - if memo.karpenter_namespace_exists { - info!("Uninstalling karpenter"); - let status = Command::new("helm") - .env("KUBECONFIG", CLUSTER_KUBECONFIG) - .args(["uninstall", "karpenter", "--namespace", "karpenter"]) - .status() - .context(Resources::Remaining, "Failed to create helm template")?; + info!("Removing taint from tainted nodegroup"); + eks_client + .update_nodegroup_config() + .cluster_name(&spec.configuration.cluster_name) + .nodegroup_name(TAINTED_NODEGROUP_NAME) + .taints( + UpdateTaintsPayload::builder() + .remove_taints( + Taint::builder() + .key("sonobuoy") + .value("ignore") + .effect(TaintEffect::NoSchedule) + .build(), + ) + .build(), + ) + .send() + .await + .context(resources, "Unable to apply taints")?; + + info!("Creating K8s Client from cluster kubeconfig"); + let kubeconfig = Kubeconfig::read_from(CLUSTER_KUBECONFIG) + .context(resources, "Unable to create config from cluster kubeconfig")?; + let k8s_client: Client = + Config::from_custom_kubeconfig(kubeconfig, &KubeConfigOptions::default()) + .await + .context(resources, "Unable to convert kubeconfig")? + .try_into() + .context( + resources, + "Unable to create k8s client from cluster kubeconfig", + )?; + + if memo.inflate_deployment_exists { + info!("Deleting inflate deployment"); + let deployment_api = Api::::namespaced(k8s_client.clone(), "default"); + deployment_api + .delete("inflate", &Default::default()) + .await + .context(resources, "Unable to delete deployment")?; + + let node_api = Api::::all(k8s_client); + + info!("Waiting for karpenter nodes to be cleaned up"); + tokio::time::timeout( + Duration::from_secs(600), + wait_for_nodes(&node_api, 2, Ordering::Equal), + ) + .await + .context( + resources, + "Timed out waiting for karpenter nodes to leave the cluster", + )??; + } + + if memo.karpenter_namespace_exists { + info!("Uninstalling karpenter"); + let status = Command::new("helm") + .env("KUBECONFIG", CLUSTER_KUBECONFIG) + .args(["uninstall", "karpenter", "--namespace", "karpenter"]) + .status() + .context(Resources::Remaining, "Failed to create helm template")?; + + if !status.success() { + return Err(ProviderError::new_with_context( + Resources::Remaining, + format!( + "Failed to launch karpenter template with status code {}", + status + ), + )); + } + } + info!("Deleting tainted nodegroup"); + let status = Command::new("eksctl") + .args([ + "delete", + "nodegroup", + "-r", + &spec.configuration.region, + "--cluster", + spec.configuration.cluster_name.as_str(), + "--name", + TAINTED_NODEGROUP_NAME, + "--wait", + "--disable-eviction", + ]) + .status() + .context(resources, "Failed to delete nodegroup")?; if !status.success() { return Err(ProviderError::new_with_context( - Resources::Remaining, - format!( - "Failed to launch karpenter template with status code {}", - status - ), + resources, + format!("Failed to delete nodegroup with status code {}", status), )); } + + memo.current_status = "Instances deleted".into(); + client.send_info(memo.clone()).await.map_err(|e| { + ProviderError::new_with_source_and_context( + resources, + "Error sending final destruction message", + e, + ) + })?; + } + + // Remove the instance profile from the karpenter role + let iam_client = aws_sdk_iam::Client::new(&shared_config); + let instance_profile_out = iam_client + .list_instance_profiles_for_role() + .role_name(format!( + "KarpenterNodeRole-{}", + spec.configuration.cluster_name + )) + .send() + .await + .context(Resources::Remaining, "Unable to list instance profiles")?; + let instance_profile = instance_profile_out + .instance_profiles() + .and_then(|profiles| profiles.first()) + .and_then(|instance_profile| instance_profile.instance_profile_name().to_owned()); + + if let Some(instance_profile) = instance_profile { + iam_client + .remove_role_from_instance_profile() + .instance_profile_name(instance_profile) + .role_name(format!( + "KarpenterNodeRole-{}", + spec.configuration.cluster_name + )) + .send() + .await + .context( + Resources::Remaining, + "Unable to remove role from instance profile", + )?; } - info!("Deleting tainted nodegroup"); let status = Command::new("eksctl") .args([ "delete", - "nodegroup", + "iamserviceaccount", "-r", &spec.configuration.region, "--cluster", spec.configuration.cluster_name.as_str(), "--name", - TAINTED_NODEGROUP_NAME, + "karpenter", + "--namespace", + "karpenter", "--wait", ]) - .status() - .context(resources, "Failed to delete nodegroup")?; - if !status.success() { - return Err(ProviderError::new_with_context( - resources, - format!("Failed to delete nodegroup with status code {}", status), - )); + .status(); + if status.is_err() { + warn!("Unable to delete service account. It is possible it was already deleted."); } - memo.current_status = "Instances deleted".into(); - client.send_info(memo.clone()).await.map_err(|e| { - ProviderError::new_with_source_and_context( - resources, - "Error sending final destruction message", - e, - ) - })?; + let status = iam_client + .delete_role() + .role_name(format!( + "KarpenterControllerRole-{}", + &spec.configuration.cluster_name + )) + .send() + .await; + if status.is_err() { + warn!("Unable to Karpenter controller role. It is possible it was already deleted."); + } + + let stack_name = format!("Karpenter-{}", spec.configuration.cluster_name); + let cfn_client = aws_sdk_cloudformation::Client::new(&shared_config); + + cfn_client + .delete_stack() + .stack_name(&stack_name) + .send() + .await + .context( + Resources::Remaining, + "Unable to delete cloudformation stack", + )?; + + let _ = tokio::time::timeout( + Duration::from_secs(600), + wait_for_cloudformation_stack_deletion(stack_name, &cfn_client), + ) + .await + .context( + Resources::Remaining, + "Timed out waiting for cloud formation stack to delete.", + )?; Ok(()) } } + +async fn wait_for_cloudformation_stack( + stack_name: String, + desired_state: StackStatus, + cfn_client: &aws_sdk_cloudformation::Client, +) -> ProviderResult<()> { + let mut state = StackStatus::CreateInProgress; + while state != desired_state { + info!( + "Waiting for cloudformation stack '{}' to reach '{:?}' state", + stack_name, desired_state + ); + state = cfn_client + .describe_stacks() + .stack_name(&stack_name) + .send() + .await + .context(Resources::Remaining, "Unable to describe stack")? + .stacks + .and_then(|stacks| stacks.into_iter().next()) + .and_then(|stack| stack.stack_status) + .unwrap_or(StackStatus::CreateInProgress); + tokio::time::sleep(Duration::from_secs(5)).await; + } + Ok(()) +} + +async fn wait_for_cloudformation_stack_deletion( + stack_name: String, + cfn_client: &aws_sdk_cloudformation::Client, +) -> ProviderResult<()> { + loop { + info!( + "Waiting for cloudformation stack '{}' to be deleted", + stack_name + ); + if cfn_client + .describe_stacks() + .stack_name(&stack_name) + .send() + .await + .context(Resources::Remaining, "Unable to describe stack")? + .stacks() + .map(|s| s.is_empty()) + .unwrap_or_default() + { + return Ok(()); + } + tokio::time::sleep(Duration::from_secs(5)).await; + } +} diff --git a/bottlerocket/types/src/agent_config.rs b/bottlerocket/types/src/agent_config.rs index fc6e9142..aae28e62 100644 --- a/bottlerocket/types/src/agent_config.rs +++ b/bottlerocket/types/src/agent_config.rs @@ -327,6 +327,9 @@ pub struct Ec2KarpenterConfig { /// The cluster security group pub cluster_sg: String, + /// The version of karpenter to use + pub karpenter_version: Option, + /// The device mappings used for karpenter provisioning #[serde(default)] pub device_mappings: Vec, diff --git a/controller/src/resource_controller/action.rs b/controller/src/resource_controller/action.rs index cf0a0dce..841b5e2f 100644 --- a/controller/src/resource_controller/action.rs +++ b/controller/src/resource_controller/action.rs @@ -171,7 +171,7 @@ async fn creation_not_done_action( if !is_task_state_running && !r.resource().has_finalizer(FINALIZER_CREATION_JOB) { return Ok(CreationAction::AddJobFinalizer); } - if !is_task_state_running && !r.resource().has_finalizer(FINALIZER_CLEANUP_REQUIRED) { + if !r.resource().has_finalizer(FINALIZER_CLEANUP_REQUIRED) { return Ok(CreationAction::AddCleanupFinalizer); } let job_state = r.get_job_state(ResourceAction::Create).await?; @@ -296,6 +296,9 @@ async fn destruction_action_with_resources(r: &ResourceInterface) -> Result