diff --git a/charts/k3s/templates/statefulset.yaml b/charts/k3s/templates/statefulset.yaml index 4500f5efc..641fca82f 100644 --- a/charts/k3s/templates/statefulset.yaml +++ b/charts/k3s/templates/statefulset.yaml @@ -109,6 +109,8 @@ spec: volumes: - name: helm-cache emptyDir: {} + - name: k3s-binary + emptyDir: {} - name: tmp emptyDir: {} - name: config @@ -149,8 +151,8 @@ spec: {{ toYaml . | indent 8 }} {{- end }} {{- end }} - containers: {{- if and (not .Values.vcluster.disabled) (not .Values.noopSyncer.enabled) }} + initContainers: - image: {{ .Values.defaultImageRegistry }}{{ .Values.vcluster.image }} name: vcluster # k3s has a problem running as pid 1 and disabled agents on cgroupv2 @@ -160,70 +162,19 @@ spec: - /bin/sh args: - -c - - {{ range $f := .Values.vcluster.command -}} - {{ $f }} - {{- end }} - {{- range $f := .Values.vcluster.baseArgs }} - {{ $f }} - {{- end }} - {{- if not .Values.sync.nodes.enableScheduler }} - --disable-scheduler - --kube-controller-manager-arg=controllers=*,-nodeipam,-nodelifecycle,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle,-ttl - --kube-apiserver-arg=endpoint-reconciler-type=none - {{- else }} - --kube-controller-manager-arg=controllers=*,-nodeipam,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle,-ttl - --kube-apiserver-arg=endpoint-reconciler-type=none - --kube-controller-manager-arg=node-monitor-grace-period=1h - --kube-controller-manager-arg=node-monitor-period=1h - {{- end }} - {{- if .Values.serviceCIDR }} - --service-cidr={{ .Values.serviceCIDR }} - {{- else }} - --service-cidr=$(SERVICE_CIDR) - {{- end }} - {{- if .Values.pro }} - {{- if .Values.embeddedEtcd.enabled }} - --datastore-endpoint=https://localhost:2379 - --datastore-cafile=/data/pki/etcd/ca.crt - --datastore-certfile=/data/pki/apiserver-etcd-client.crt - --datastore-keyfile=/data/pki/apiserver-etcd-client.key - {{- end }} - {{- end }} - {{- range $f := .Values.vcluster.extraArgs }} - {{ $f }} - {{- end }} - && true - env: - {{- if .Values.vcluster.env }} -{{ toYaml .Values.vcluster.env | indent 10 }} - {{- end }} - - name: K3S_TOKEN - valueFrom: - secretKeyRef: - name: "vc-k3s-{{ .Release.Name }}" - key: "token" - {{- if not .Values.serviceCIDR }} - - name: SERVICE_CIDR - valueFrom: - configMapKeyRef: - name: "vc-cidr-{{ .Release.Name }}" - key: cidr - {{- end }} + - "cp /bin/k3s /k3s-binary/k3s" {{- if .Values.vcluster.imagePullPolicy }} imagePullPolicy: {{ .Values.vcluster.imagePullPolicy }} {{- end }} securityContext: {{ toYaml .Values.securityContext | indent 10 }} volumeMounts: - - name: config - mountPath: /etc/rancher -{{ toYaml .Values.vcluster.volumeMounts | indent 10 }} - {{- if .Values.vcluster.extraVolumeMounts }} -{{ toYaml .Values.vcluster.extraVolumeMounts | indent 10 }} - {{- end }} + - name: k3s-binary + mountPath: /k3s-binary resources: {{ toYaml .Values.vcluster.resources | indent 10 }} {{- end }} + containers: {{- if not .Values.syncer.disabled }} - name: syncer {{- if .Values.syncer.image }} @@ -352,6 +303,9 @@ spec: securityContext: {{ toYaml .Values.securityContext | indent 10 }} env: + {{- if .Values.vcluster.env }} +{{ toYaml .Values.vcluster.env | indent 10 }} + {{- end }} {{- if .Values.syncer.env }} {{ toYaml .Values.syncer.env | indent 10 }} {{- end }} @@ -363,6 +317,37 @@ spec: valueFrom: fieldRef: fieldPath: status.podIP + - name: VCLUSTER_COMMAND + value: |- + command: + {{ range $f := .Values.vcluster.command -}} + - {{ $f }} + {{- end }} + args: + {{- range $f := .Values.vcluster.baseArgs }} + - {{ $f }} + {{- end }} + {{- if not .Values.sync.nodes.enableScheduler }} + - --disable-scheduler + - --kube-controller-manager-arg=controllers=*,-nodeipam,-nodelifecycle,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle,-ttl + - --kube-apiserver-arg=endpoint-reconciler-type=none + {{- else }} + - --kube-controller-manager-arg=controllers=*,-nodeipam,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle,-ttl + - --kube-apiserver-arg=endpoint-reconciler-type=none + - --kube-controller-manager-arg=node-monitor-grace-period=1h + - --kube-controller-manager-arg=node-monitor-period=1h + {{- end }} + {{- if .Values.pro }} + {{- if .Values.embeddedEtcd.enabled }} + - --datastore-endpoint=https://localhost:2379 + - --datastore-cafile=/data/pki/etcd/ca.crt + - --datastore-certfile=/data/pki/apiserver-etcd-client.crt + - --datastore-keyfile=/data/pki/apiserver-etcd-client.key + {{- end }} + {{- end }} + {{- range $f := .Values.vcluster.extraArgs }} + - {{ $f }} + {{- end }} {{- if eq (.Values.replicas | toString | atoi) 1 }} - name: VCLUSTER_NODE_NAME valueFrom: @@ -377,8 +362,12 @@ spec: - name: VCLUSTER_TELEMETRY_CONFIG value: {{ .Values.telemetry | toJson | quote }} volumeMounts: + - name: k3s-binary + mountPath: /k3s-binary - name: helm-cache mountPath: /.cache/helm + - name: config + mountPath: /etc/rancher - name: tmp mountPath: /tmp {{- if or .Values.coredns.enabled .Values.coredns.integrated }} @@ -409,6 +398,12 @@ spec: {{ toYaml .Values.syncer.volumeMounts | indent 10 }} {{- if .Values.syncer.extraVolumeMounts }} {{ toYaml .Values.syncer.extraVolumeMounts | indent 10 }} + {{- end }} + {{- if .Values.vcluster.volumeMounts }} +{{ toYaml .Values.vcluster.volumeMounts | indent 10 }} + {{- end }} + {{- if .Values.vcluster.extraVolumeMounts }} +{{ toYaml .Values.vcluster.extraVolumeMounts | indent 10 }} {{- end }} resources: {{ toYaml .Values.syncer.resources | indent 10 }} diff --git a/charts/k3s/values.yaml b/charts/k3s/values.yaml index 8cfd73705..f9726357f 100644 --- a/charts/k3s/values.yaml +++ b/charts/k3s/values.yaml @@ -150,13 +150,10 @@ syncer: extraVolumeMounts: [] resources: limits: - cpu: 1000m - memory: 512Mi + memory: 2Gi requests: - # ensure that cpu/memory requests are high enough. - # for example gke wants minimum 10m/32Mi here! - cpu: 20m - memory: 64Mi + cpu: 200m + memory: 256Mi kubeConfigContextName: "my-vcluster" serviceAnnotations: {} @@ -166,7 +163,7 @@ vcluster: image: rancher/k3s:v1.28.2-k3s1 imagePullPolicy: "" command: - - /bin/k3s + - /k3s-binary/k3s baseArgs: - server - --write-kubeconfig=/data/k3s-config/kube-config.yaml @@ -178,17 +175,19 @@ vcluster: - --flannel-backend=none - --kube-apiserver-arg=bind-address=127.0.0.1 extraArgs: [] + # Deprecated: Use syncer.extraVolumeMounts instead extraVolumeMounts: [] - volumeMounts: - - mountPath: /data - name: data + # Deprecated: Use syncer.volumeMounts instead + volumeMounts: [] + # Deprecated: Use syncer.env instead env: [] resources: limits: - memory: 2Gi + cpu: 100m + memory: 128Mi requests: - cpu: 200m - memory: 256Mi + cpu: 20m + memory: 64Mi # Embedded etcd settings embeddedEtcd: diff --git a/devspace.yaml b/devspace.yaml index 241d8a310..98d95b661 100644 --- a/devspace.yaml +++ b/devspace.yaml @@ -26,6 +26,7 @@ deployments: chart: name: ./charts/k8s values: &values + autoDeletePersistentVolumeClaims: false k3sToken: "devspace" job: enabled: false diff --git a/pkg/k3s/k3s.go b/pkg/k3s/k3s.go index 76c2b0e85..30a2b6e0a 100644 --- a/pkg/k3s/k3s.go +++ b/pkg/k3s/k3s.go @@ -4,24 +4,94 @@ import ( "context" "fmt" "os" + "os/exec" + "strings" + "github.com/ghodss/yaml" + "github.com/loft-sh/log/scanner" "github.com/loft-sh/vcluster/pkg/util/random" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" ) var tokenPath = "/data/server/token" -func EnsureK3SToken(ctx context.Context, currentNamespaceClient kubernetes.Interface, currentNamespace, vClusterName string) error { +const VClusterCommandEnv = "VCLUSTER_COMMAND" + +type k3sCommand struct { + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` +} + +func StartK3S(ctx context.Context, serviceCIDR, k3sToken string) error { + reader, writer, err := os.Pipe() + if err != nil { + return err + } + defer writer.Close() + + command := &k3sCommand{} + err = yaml.Unmarshal([]byte(os.Getenv(VClusterCommandEnv)), command) + if err != nil { + return fmt.Errorf("parsing k3s command %s: %w", os.Getenv(VClusterCommandEnv), err) + } + + // add service cidr and k3s token + command.Args = append( + command.Args, + "--service-cidr", serviceCIDR, + "--token", k3sToken, + ) + args := append(command.Command, command.Args...) + + // start func + done := make(chan struct{}) + go func() { + defer close(done) + + // make sure we scan the output correctly + scan := scanner.NewScanner(reader) + for scan.Scan() { + line := scan.Text() + if len(line) == 0 { + continue + } + + // print to our logs + args := []interface{}{"component", "k3s"} + PrintK3sLine(line, args) + } + }() + + // start the command + klog.InfoS("Starting k3s", "args", strings.Join(args, " ")) + cmd := exec.CommandContext(ctx, args[0], args[1:]...) + cmd.Stdout = writer + cmd.Stderr = writer + err = cmd.Run() + + // make sure we wait for scanner to be done + _ = writer.Close() + <-done + + // regular stop case + if err != nil && err.Error() != "signal: killed" { + return err + } + return nil +} + +func EnsureK3SToken(ctx context.Context, currentNamespaceClient kubernetes.Interface, currentNamespace, vClusterName string) (string, error) { // check if secret exists secretName := fmt.Sprintf("vc-k3s-%s", vClusterName) - _, err := currentNamespaceClient.CoreV1().Secrets(currentNamespace).Get(ctx, secretName, metav1.GetOptions{}) + secret, err := currentNamespaceClient.CoreV1().Secrets(currentNamespace).Get(ctx, secretName, metav1.GetOptions{}) if err != nil && !kerrors.IsNotFound(err) { - return err + return "", err } else if err == nil { - return nil + return string(secret.Data["token"]), nil } // try to read token file (migration case) @@ -31,7 +101,7 @@ func EnsureK3SToken(ctx context.Context, currentNamespaceClient kubernetes.Inter } // create k3s secret - _, err = currentNamespaceClient.CoreV1().Secrets(currentNamespace).Create(ctx, &corev1.Secret{ + secret, err = currentNamespaceClient.CoreV1().Secrets(currentNamespace).Create(ctx, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, Namespace: currentNamespace, @@ -42,8 +112,14 @@ func EnsureK3SToken(ctx context.Context, currentNamespaceClient kubernetes.Inter Type: corev1.SecretTypeOpaque, }, metav1.CreateOptions{}) if err != nil && !kerrors.IsAlreadyExists(err) { - return err + return "", err + } else if err != nil { + // retrieve k3s secret again + secret, err = currentNamespaceClient.CoreV1().Secrets(currentNamespace).Get(ctx, secretName, metav1.GetOptions{}) + if err != nil { + return "", err + } } - return nil + return string(secret.Data["token"]), nil } diff --git a/pkg/k3s/parse.go b/pkg/k3s/parse.go new file mode 100644 index 000000000..f9ee90947 --- /dev/null +++ b/pkg/k3s/parse.go @@ -0,0 +1,109 @@ +package k3s + +import ( + "regexp" + "strings" + + "k8s.io/klog/v2" +) + +var klogRegEx1 = regexp.MustCompile(`^[A-Z][0-9]{4} [0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{6}\s+[0-9]+\s([^]]+)] (.+)$`) + +var structuredComponent = regexp.MustCompile(`^([a-zA-Z\-_]+)=`) + +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md +func PrintK3sLine(line string, args []interface{}) { + if klogRegEx1.MatchString(line) { + matches := klogRegEx1.FindStringSubmatch(line) + args = append(args, "location", matches[1]) + line = matches[2] + } + + // try to parse structured logging + line, extraArgs := parseStructuredLogging(line) + klog.InfoS(line, append(args, extraArgs...)...) +} + +func parseStructuredLogging(line string) (string, []interface{}) { + if len(line) == 0 { + return line, nil + } + + line = strings.TrimSpace(line) + + // parse message + message, line := parseQuotedMessage(line, true) + if line == "" && structuredComponent.MatchString(message) { + line = message + message = "" + } + + // parse structured + retArgs := []interface{}{} + for line != "" { + if !structuredComponent.MatchString(line) { + break + } + + matches := structuredComponent.FindStringSubmatch(line) + name := matches[1] + line = line[len(matches[1])+1:] + if message == "" && name == "msg" { + value, restOfLine := parseQuotedMessage(line, false) + + message = value + line = restOfLine + } else { + retArgs = append(retArgs, name) + value, restOfLine := parseQuotedMessage(line, false) + retArgs = append(retArgs, value) + + line = restOfLine + } + } + + return message, retArgs +} + +func parseQuotedMessage(line string, allowSpace bool) (string, string) { + message := "" + if strings.HasPrefix(line, `"`) { + message = line[1:] + if strings.HasPrefix(message, `"`) { + message = "" + } else { + // find the next non \" + baseIndex := 0 + for { + nextIndex := strings.Index(message[baseIndex:], `"`) + nextIndex += baseIndex + + // unclosed " + if nextIndex == -1 { + return line, "" + } else if nextIndex > 0 && message[nextIndex-1] != '\\' { + message = message[:nextIndex] + break + } + + baseIndex = nextIndex + 1 + if baseIndex >= len(message) { + return line, "" + } + } + } + + line = strings.TrimSpace(line[len(message)+2:]) + } else { + if allowSpace { + return strings.ReplaceAll(line, `\"`, `"`), "" + } + + nextSpace := strings.Index(line, ` `) + if nextSpace > 0 { + return strings.ReplaceAll(line[:nextSpace], `\"`, `"`), line[nextSpace+1:] + } + } + + return strings.ReplaceAll(message, `\"`, `"`), line +} diff --git a/pkg/setup/initialize.go b/pkg/setup/initialize.go index cb778611d..ce291a8f3 100644 --- a/pkg/setup/initialize.go +++ b/pkg/setup/initialize.go @@ -34,8 +34,9 @@ func Initialize( } // Ensure that service CIDR range is written into the expected location - err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 2*time.Minute, true, func(waitCtx context.Context) (bool, error) { err := initialize( + waitCtx, ctx, workspaceNamespaceClient, currentNamespaceClient, @@ -62,6 +63,7 @@ func Initialize( // initialize creates the required secrets and configmaps for the control plane to start func initialize( ctx context.Context, + parentCtx context.Context, workspaceNamespaceClient, currentNamespaceClient kubernetes.Interface, workspaceNamespace, @@ -104,10 +106,20 @@ func initialize( // check if k3s if !isK0s && certificatesDir != "/pki" { // its k3s, let's create the token secret - err = k3s.EnsureK3SToken(ctx, currentNamespaceClient, currentNamespace, vClusterName) + k3sToken, err := k3s.EnsureK3SToken(ctx, currentNamespaceClient, currentNamespace, vClusterName) if err != nil { return err } + + // start k3s + go func() { + // we need to run this with the parent ctx as otherwise this context will be cancelled by the wait + // loop in Initialize + err := k3s.StartK3S(parentCtx, serviceCIDR, k3sToken) + if err != nil { + klog.Fatalf("Error running k3s: %v", err) + } + }() } return nil