diff --git a/pkg/cli/add_vcluster_helm.go b/pkg/cli/add_vcluster_helm.go index 5c760bc7d..55306139d 100644 --- a/pkg/cli/add_vcluster_helm.go +++ b/pkg/cli/add_vcluster_helm.go @@ -144,6 +144,7 @@ func addVClusterHelm( globalFlags.LoadedConfig(log), kubeClient, options.ImportName, + vCluster.Name, vCluster.Namespace, options.Project, options.AccessKey, diff --git a/pkg/cli/config/types.go b/pkg/cli/config/types.go index 0968c77b4..ffabf7ec6 100644 --- a/pkg/cli/config/types.go +++ b/pkg/cli/config/types.go @@ -32,8 +32,6 @@ type Platform struct { LastInstallContext string `json:"lastInstallContext,omitempty"` // AccessKey is the access key for the given loft host AccessKey string `json:"accesskey,omitempty"` - // VirtualClusterAccessKey is the access key for the given loft host to create virtual clusters - VirtualClusterAccessKey string `json:"virtualClusterAccessKey,omitempty"` // Insecure specifies if the loft instance is insecure Insecure bool `json:"insecure,omitempty"` // CertificateAuthorityData is passed as certificate-authority-data to the platform config diff --git a/pkg/cli/create_helm.go b/pkg/cli/create_helm.go index 2b6271994..d6832b6a4 100644 --- a/pkg/cli/create_helm.go +++ b/pkg/cli/create_helm.go @@ -301,7 +301,7 @@ func CreateHelm(ctx context.Context, options *CreateOptions, globalFlags *flags. // create platform secret if cmd.Add { - err = cmd.addVCluster(ctx, vClusterConfig) + err = cmd.addVCluster(ctx, vClusterName, vClusterConfig) if err != nil { return err } @@ -372,11 +372,11 @@ func (cmd *createHelm) parseVClusterYAML(chartValues string) (*config.Config, er return vClusterConfig, nil } -func (cmd *createHelm) addVCluster(ctx context.Context, vClusterConfig *config.Config) error { +func (cmd *createHelm) addVCluster(ctx context.Context, name string, vClusterConfig *config.Config) error { platformConfig, err := vClusterConfig.GetPlatformConfig() if err != nil { return fmt.Errorf("get platform config: %w", err) - } else if platformConfig.APIKey.SecretName != "" { + } else if platformConfig.APIKey.SecretName != "" || platformConfig.APIKey.Namespace != "" { return nil } @@ -390,7 +390,7 @@ func (cmd *createHelm) addVCluster(ctx context.Context, vClusterConfig *config.C return nil } - err = platform.ApplyPlatformSecret(ctx, cmd.LoadedConfig(cmd.log), cmd.kubeClient, "", cmd.Namespace, cmd.Project, "", "", false, cmd.LoadedConfig(cmd.log).Platform.CertificateAuthorityData) + err = platform.ApplyPlatformSecret(ctx, cmd.LoadedConfig(cmd.log), cmd.kubeClient, "", name, cmd.Namespace, cmd.Project, "", "", false, cmd.LoadedConfig(cmd.log).Platform.CertificateAuthorityData) if err != nil { return fmt.Errorf("apply platform secret: %w", err) } diff --git a/pkg/k8s/k8s.go b/pkg/k8s/k8s.go index 001f03f64..3cdc34742 100644 --- a/pkg/k8s/k8s.go +++ b/pkg/k8s/k8s.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "os" "os/exec" "strings" "time" @@ -243,6 +244,24 @@ func RunCommand(ctx context.Context, command []string, component string) error { cmd := exec.CommandContext(ctx, command[0], command[1:]...) cmd.Stdout = writer.Writer() cmd.Stderr = writer.Writer() + cmd.Cancel = func() error { + err := cmd.Process.Signal(os.Interrupt) + if err != nil { + return fmt.Errorf("signal %s: %w", command[0], err) + } + + state, err := cmd.Process.Wait() + if err == nil && state.Pid() > 0 { + time.Sleep(2 * time.Second) + } + + err = cmd.Process.Kill() + if err != nil { + return fmt.Errorf("kill %s: %w", command[0], err) + } + + return nil + } err = cmd.Run() // make sure we wait for scanner to be done diff --git a/pkg/platform/secret.go b/pkg/platform/secret.go index bddd6de08..12a84269d 100644 --- a/pkg/platform/secret.go +++ b/pkg/platform/secret.go @@ -7,11 +7,13 @@ import ( "reflect" "strconv" "strings" - "time" managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" "github.com/loft-sh/vcluster/pkg/cli/config" + "github.com/loft-sh/vcluster/pkg/platform/kube" + "github.com/loft-sh/vcluster/pkg/projectutil" + "github.com/loft-sh/vcluster/pkg/util/random" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -21,11 +23,14 @@ import ( const DefaultPlatformSecretName = "vcluster-platform-api-key" +const CreatedByCLILabel = "vcluster.loft.sh/created-by-cli" + func ApplyPlatformSecret( ctx context.Context, config *config.CLI, kubeClient kubernetes.Interface, importName, + name, namespace, project, accessKey, @@ -33,24 +38,41 @@ func ApplyPlatformSecret( insecure bool, certificateAuthorityData []byte, ) error { - var err error - accessKey, host, insecure, err = getAccessKeyAndHost(ctx, config, accessKey, host, insecure) + // init platform client + platformClient, err := InitClientFromConfig(ctx, config) if err != nil { - return fmt.Errorf("get access key and host: %w", err) + return err + } + + // set host + if host == "" { + host = strings.TrimPrefix(platformClient.Config().Platform.Host, "https://") + } + if !insecure { + insecure = platformClient.Config().Platform.Insecure + } + if project == "" { + project = "default" + } + + // get access key + if accessKey == "" { + accessKey, importName, err = getAccessKey(ctx, kubeClient, platformClient, importName, name, namespace, project) + if err != nil { + return fmt.Errorf("get access key: %w", err) + } } // build secret payload - payload := map[string][]byte{ + secretPayload := map[string][]byte{ "accessKey": []byte(accessKey), "host": []byte(strings.TrimPrefix(host, "https://")), + "project": []byte(project), "insecure": []byte(strconv.FormatBool(insecure)), "certificateAuthorityData": certificateAuthorityData, } - if project != "" { - payload["project"] = []byte(project) - } if importName != "" { - payload["name"] = []byte(importName) + secretPayload["name"] = []byte(importName) } // check if secret already exists @@ -62,15 +84,18 @@ func ApplyPlatformSecret( ObjectMeta: metav1.ObjectMeta{ Name: DefaultPlatformSecretName, Namespace: namespace, + Labels: map[string]string{ + CreatedByCLILabel: "true", + }, }, - Data: payload, + Data: secretPayload, }, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("error creating platform secret %s/%s: %w", namespace, DefaultPlatformSecretName, err) } return nil - } else if keySecret != nil && reflect.DeepEqual(keySecret.Data, payload) { + } else if keySecret != nil && reflect.DeepEqual(keySecret.Data, secretPayload) { // no update needed, just return return nil } @@ -81,7 +106,7 @@ func ApplyPlatformSecret( // create the patch patch := ctrlclient.MergeFrom(keySecret.DeepCopy()) - keySecret.Data = payload + keySecret.Data = secretPayload patchBytes, err := patch.Data(keySecret) if err != nil { return fmt.Errorf("error creating patch for platform secret %s/%s: %w", namespace, DefaultPlatformSecretName, err) @@ -96,84 +121,121 @@ func ApplyPlatformSecret( return nil } -func getAccessKeyAndHost(ctx context.Context, config *config.CLI, accessKey, host string, insecure bool) (string, string, bool, error) { - if host != "" && accessKey != "" { - return accessKey, host, insecure, nil - } - - platformClient, err := InitClientFromConfig(ctx, config) +func getAccessKey(ctx context.Context, kubeClient kubernetes.Interface, platformClient Client, importName, name, namespace, project string) (string, string, error) { + // get management client + managementClient, err := platformClient.Management() if err != nil { - return "", "", false, err + return "", "", fmt.Errorf("error getting management client: %w", err) } - if host == "" { - host = strings.TrimPrefix(platformClient.Config().Platform.Host, "https://") + + // get service and then search virtual cluster instance with service uid + service, err := kubeClient.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return "", "", fmt.Errorf("could not get service %s/%s: %v", namespace, name, err) + } else if err == nil { + serviceUID := string(service.UID) + + // find existing vCluster + virtualClusterList, err := managementClient.Loft().ManagementV1().VirtualClusterInstances(projectutil.ProjectNamespace(project)).List(ctx, metav1.ListOptions{}) + if err != nil { + return "", "", fmt.Errorf("could not list virtual cluster instances in project %s: %v", project, err) + } + + // try to find vCluster + var virtualClusterInstance *managementv1.VirtualClusterInstance + for _, vci := range virtualClusterList.Items { + if vci.Status.ServiceUID == serviceUID { + virtualClusterInstance = &vci + break + } + } + + // get access key for existing instance + if virtualClusterInstance != nil { + return returnAccessKeyFromInstance(ctx, managementClient, virtualClusterInstance) + } } - if !insecure { - insecure = platformClient.Config().Platform.Insecure + + // we need to create a new instance here + vName := importName + if vName == "" { + vName = name } - managementClient, err := platformClient.Management() + // try with the regular name first + created, accessKey, createdName, err := createWithName(ctx, managementClient, project, vName) if err != nil { - return "", "", false, fmt.Errorf("create management client: %w", err) + return "", "", fmt.Errorf("error creating platform secret %s/%s: %w", namespace, DefaultPlatformSecretName, err) + } else if created { + return accessKey, createdName, nil + } else if importName != "" { + return "", "", fmt.Errorf("virtual cluster instance with name %s already exists", importName) } - // check if we need to find access key - if accessKey != "" { - return accessKey, host, insecure, nil + // try with random name + vName += "-" + random.String(5) + created, accessKey, createdName, err = createWithName(ctx, managementClient, project, vName) + if err != nil { + return "", "", fmt.Errorf("error creating platform secret %s/%s: %w", namespace, DefaultPlatformSecretName, err) + } else if !created { + return "", "", fmt.Errorf("couldn't create virtual cluster instance, name %s already exists", vName) } - // is the access key still valid? - platformConfig := platformClient.Config().Platform - if platformConfig.VirtualClusterAccessKey != "" { - selfCtx, cancel := context.WithTimeout(ctx, 1*time.Minute) - self, err := managementClient.Loft().ManagementV1().Selves().Create(selfCtx, &managementv1.Self{ - Spec: managementv1.SelfSpec{ - AccessKey: platformConfig.VirtualClusterAccessKey, - }, - }, metav1.CreateOptions{}) - cancel() - if err != nil || self.Status.Subject != platformClient.Self().Status.Subject { - platformConfig.VirtualClusterAccessKey = "" - } - } + return accessKey, createdName, nil +} - // check if we need to create a virtual cluster access key - if platformConfig.VirtualClusterAccessKey == "" { - user := "" - team := "" - if platformClient.Self().Status.User != nil { - user = platformClient.Self().Status.User.Name - } - if platformClient.Self().Status.Team != nil { - team = platformClient.Self().Status.Team.Name +func createWithName(ctx context.Context, managementClient kube.Interface, project string, name string) (bool, string, string, error) { + namespace := projectutil.ProjectNamespace(project) + virtualClusterInstance, err := managementClient.Loft().ManagementV1().VirtualClusterInstances(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return false, "", "", fmt.Errorf("could not get virtual cluster instance %s/%s: %w", project, name, err) + } else if err == nil { + // instance has no service uid yet + if virtualClusterInstance.Spec.External && virtualClusterInstance.Status.ServiceUID == "" { + accessKey, createdName, err := returnAccessKeyFromInstance(ctx, managementClient, virtualClusterInstance) + return true, accessKey, createdName, err } - accessKey, err := managementClient.Loft().ManagementV1().OwnedAccessKeys().Create(ctx, &managementv1.OwnedAccessKey{ - Spec: managementv1.OwnedAccessKeySpec{ - AccessKeySpec: storagev1.AccessKeySpec{ - DisplayName: "vCluster CLI Activation Key", - User: user, - Team: team, - Scope: &storagev1.AccessKeyScope{ - Roles: []storagev1.AccessKeyScopeRole{ - { - Role: storagev1.AccessKeyScopeRoleVCluster, + return false, "", "", nil + } + + virtualClusterInstance, err = managementClient.Loft().ManagementV1().VirtualClusterInstances(namespace).Create(ctx, &managementv1.VirtualClusterInstance{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + CreatedByCLILabel: "true", + }, + }, + Spec: managementv1.VirtualClusterInstanceSpec{ + VirtualClusterInstanceSpec: storagev1.VirtualClusterInstanceSpec{ + Template: &storagev1.VirtualClusterTemplateDefinition{ + VirtualClusterCommonSpec: storagev1.VirtualClusterCommonSpec{ + HelmRelease: storagev1.VirtualClusterHelmRelease{ + Chart: storagev1.VirtualClusterHelmChart{ + Version: "0.0.0", }, }, }, }, + External: true, + NetworkPeer: true, }, - }, metav1.CreateOptions{}) - if err != nil { - return "", "", false, fmt.Errorf("create owned access key: %w", err) - } + }, + }, metav1.CreateOptions{}) + if err != nil { + return false, "", "", fmt.Errorf("create virtual cluster instance: %w", err) + } - platformConfig.VirtualClusterAccessKey = accessKey.Spec.Key - platformClient.Config().Platform = platformConfig - if err := platformClient.Save(); err != nil { - return "", "", false, fmt.Errorf("save vCluster platform config: %w", err) - } + accessKey, createdName, err := returnAccessKeyFromInstance(ctx, managementClient, virtualClusterInstance) + return true, accessKey, createdName, err +} + +func returnAccessKeyFromInstance(ctx context.Context, managementClient kube.Interface, virtualClusterInstance *managementv1.VirtualClusterInstance) (string, string, error) { + accessKey, err := managementClient.Loft().ManagementV1().VirtualClusterInstances(virtualClusterInstance.Namespace).GetAccessKey(ctx, virtualClusterInstance.Name, metav1.GetOptions{}) + if err != nil { + return "", "", fmt.Errorf("get access key for virtual cluster instance %s/%s: %w", virtualClusterInstance.Namespace, virtualClusterInstance.Name, err) } - return platformConfig.VirtualClusterAccessKey, host, insecure, nil + return accessKey.AccessKey, virtualClusterInstance.Name, nil }