Skip to content

Commit

Permalink
refactor: delete k3s container and move to init container
Browse files Browse the repository at this point in the history
  • Loading branch information
FabianKramm committed Oct 30, 2023
1 parent 817403e commit 0eacaab
Show file tree
Hide file tree
Showing 6 changed files with 270 additions and 78 deletions.
107 changes: 51 additions & 56 deletions charts/k3s/templates/statefulset.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,8 @@ spec:
volumes:
- name: helm-cache
emptyDir: {}
- name: k3s-binary
emptyDir: {}
- name: tmp
emptyDir: {}
- name: config
Expand Down Expand Up @@ -149,8 +151,8 @@ spec:
{{ toYaml . | indent 8 }}
{{- end }}
{{- end }}
containers:
{{- if and (not .Values.vcluster.disabled) (not .Values.noopSyncer.enabled) }}
initContainers:
- image: {{ .Values.defaultImageRegistry }}{{ .Values.vcluster.image }}
name: vcluster
# k3s has a problem running as pid 1 and disabled agents on cgroupv2
Expand All @@ -160,70 +162,19 @@ spec:
- /bin/sh
args:
- -c
- {{ range $f := .Values.vcluster.command -}}
{{ $f }}
{{- end }}
{{- range $f := .Values.vcluster.baseArgs }}
{{ $f }}
{{- end }}
{{- if not .Values.sync.nodes.enableScheduler }}
--disable-scheduler
--kube-controller-manager-arg=controllers=*,-nodeipam,-nodelifecycle,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle,-ttl
--kube-apiserver-arg=endpoint-reconciler-type=none
{{- else }}
--kube-controller-manager-arg=controllers=*,-nodeipam,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle,-ttl
--kube-apiserver-arg=endpoint-reconciler-type=none
--kube-controller-manager-arg=node-monitor-grace-period=1h
--kube-controller-manager-arg=node-monitor-period=1h
{{- end }}
{{- if .Values.serviceCIDR }}
--service-cidr={{ .Values.serviceCIDR }}
{{- else }}
--service-cidr=$(SERVICE_CIDR)
{{- end }}
{{- if .Values.pro }}
{{- if .Values.embeddedEtcd.enabled }}
--datastore-endpoint=https://localhost:2379
--datastore-cafile=/data/pki/etcd/ca.crt
--datastore-certfile=/data/pki/apiserver-etcd-client.crt
--datastore-keyfile=/data/pki/apiserver-etcd-client.key
{{- end }}
{{- end }}
{{- range $f := .Values.vcluster.extraArgs }}
{{ $f }}
{{- end }}
&& true
env:
{{- if .Values.vcluster.env }}
{{ toYaml .Values.vcluster.env | indent 10 }}
{{- end }}
- name: K3S_TOKEN
valueFrom:
secretKeyRef:
name: "vc-k3s-{{ .Release.Name }}"
key: "token"
{{- if not .Values.serviceCIDR }}
- name: SERVICE_CIDR
valueFrom:
configMapKeyRef:
name: "vc-cidr-{{ .Release.Name }}"
key: cidr
{{- end }}
- "cp /bin/k3s /k3s-binary/k3s"
{{- if .Values.vcluster.imagePullPolicy }}
imagePullPolicy: {{ .Values.vcluster.imagePullPolicy }}
{{- end }}
securityContext:
{{ toYaml .Values.securityContext | indent 10 }}
volumeMounts:
- name: config
mountPath: /etc/rancher
{{ toYaml .Values.vcluster.volumeMounts | indent 10 }}
{{- if .Values.vcluster.extraVolumeMounts }}
{{ toYaml .Values.vcluster.extraVolumeMounts | indent 10 }}
{{- end }}
- name: k3s-binary
mountPath: /k3s-binary
resources:
{{ toYaml .Values.vcluster.resources | indent 10 }}
{{- end }}
containers:
{{- if not .Values.syncer.disabled }}
- name: syncer
{{- if .Values.syncer.image }}
Expand Down Expand Up @@ -352,6 +303,9 @@ spec:
securityContext:
{{ toYaml .Values.securityContext | indent 10 }}
env:
{{- if .Values.vcluster.env }}
{{ toYaml .Values.vcluster.env | indent 10 }}
{{- end }}
{{- if .Values.syncer.env }}
{{ toYaml .Values.syncer.env | indent 10 }}
{{- end }}
Expand All @@ -363,6 +317,37 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: VCLUSTER_COMMAND
value: |-
command:
{{ range $f := .Values.vcluster.command -}}
- {{ $f }}
{{- end }}
args:
{{- range $f := .Values.vcluster.baseArgs }}
- {{ $f }}
{{- end }}
{{- if not .Values.sync.nodes.enableScheduler }}
- --disable-scheduler
- --kube-controller-manager-arg=controllers=*,-nodeipam,-nodelifecycle,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle,-ttl
- --kube-apiserver-arg=endpoint-reconciler-type=none
{{- else }}
- --kube-controller-manager-arg=controllers=*,-nodeipam,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle,-ttl
- --kube-apiserver-arg=endpoint-reconciler-type=none
- --kube-controller-manager-arg=node-monitor-grace-period=1h
- --kube-controller-manager-arg=node-monitor-period=1h
{{- end }}
{{- if .Values.pro }}
{{- if .Values.embeddedEtcd.enabled }}
- --datastore-endpoint=https://localhost:2379
- --datastore-cafile=/data/pki/etcd/ca.crt
- --datastore-certfile=/data/pki/apiserver-etcd-client.crt
- --datastore-keyfile=/data/pki/apiserver-etcd-client.key
{{- end }}
{{- end }}
{{- range $f := .Values.vcluster.extraArgs }}
- {{ $f }}
{{- end }}
{{- if eq (.Values.replicas | toString | atoi) 1 }}
- name: VCLUSTER_NODE_NAME
valueFrom:
Expand All @@ -377,8 +362,12 @@ spec:
- name: VCLUSTER_TELEMETRY_CONFIG
value: {{ .Values.telemetry | toJson | quote }}
volumeMounts:
- name: k3s-binary
mountPath: /k3s-binary
- name: helm-cache
mountPath: /.cache/helm
- name: config
mountPath: /etc/rancher
- name: tmp
mountPath: /tmp
{{- if or .Values.coredns.enabled .Values.coredns.integrated }}
Expand Down Expand Up @@ -409,6 +398,12 @@ spec:
{{ toYaml .Values.syncer.volumeMounts | indent 10 }}
{{- if .Values.syncer.extraVolumeMounts }}
{{ toYaml .Values.syncer.extraVolumeMounts | indent 10 }}
{{- end }}
{{- if .Values.vcluster.volumeMounts }}
{{ toYaml .Values.vcluster.volumeMounts | indent 10 }}
{{- end }}
{{- if .Values.vcluster.extraVolumeMounts }}
{{ toYaml .Values.vcluster.extraVolumeMounts | indent 10 }}
{{- end }}
resources:
{{ toYaml .Values.syncer.resources | indent 10 }}
Expand Down
25 changes: 12 additions & 13 deletions charts/k3s/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -150,13 +150,10 @@ syncer:
extraVolumeMounts: []
resources:
limits:
cpu: 1000m
memory: 512Mi
memory: 2Gi
requests:
# ensure that cpu/memory requests are high enough.
# for example gke wants minimum 10m/32Mi here!
cpu: 20m
memory: 64Mi
cpu: 200m
memory: 256Mi
kubeConfigContextName: "my-vcluster"
serviceAnnotations: {}

Expand All @@ -166,7 +163,7 @@ vcluster:
image: rancher/k3s:v1.28.2-k3s1
imagePullPolicy: ""
command:
- /bin/k3s
- /k3s-binary/k3s
baseArgs:
- server
- --write-kubeconfig=/data/k3s-config/kube-config.yaml
Expand All @@ -178,17 +175,19 @@ vcluster:
- --flannel-backend=none
- --kube-apiserver-arg=bind-address=127.0.0.1
extraArgs: []
# Deprecated: Use syncer.extraVolumeMounts instead
extraVolumeMounts: []
volumeMounts:
- mountPath: /data
name: data
# Deprecated: Use syncer.volumeMounts instead
volumeMounts: []
# Deprecated: Use syncer.env instead
env: []
resources:
limits:
memory: 2Gi
cpu: 100m
memory: 128Mi
requests:
cpu: 200m
memory: 256Mi
cpu: 20m
memory: 64Mi

# Embedded etcd settings
embeddedEtcd:
Expand Down
1 change: 1 addition & 0 deletions devspace.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ deployments:
chart:
name: ./charts/k8s
values: &values
autoDeletePersistentVolumeClaims: false
k3sToken: "devspace"
job:
enabled: false
Expand Down
90 changes: 83 additions & 7 deletions pkg/k3s/k3s.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,24 +4,94 @@ import (
"context"
"fmt"
"os"
"os/exec"
"strings"

"github.com/ghodss/yaml"
"github.com/loft-sh/log/scanner"
"github.com/loft-sh/vcluster/pkg/util/random"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)

var tokenPath = "/data/server/token"

func EnsureK3SToken(ctx context.Context, currentNamespaceClient kubernetes.Interface, currentNamespace, vClusterName string) error {
const VClusterCommandEnv = "VCLUSTER_COMMAND"

type k3sCommand struct {
Command []string `json:"command,omitempty"`
Args []string `json:"args,omitempty"`
}

func StartK3S(ctx context.Context, serviceCIDR, k3sToken string) error {
reader, writer, err := os.Pipe()
if err != nil {
return err
}
defer writer.Close()

command := &k3sCommand{}
err = yaml.Unmarshal([]byte(os.Getenv(VClusterCommandEnv)), command)
if err != nil {
return fmt.Errorf("parsing k3s command %s: %w", os.Getenv(VClusterCommandEnv), err)
}

// add service cidr and k3s token
command.Args = append(
command.Args,
"--service-cidr", serviceCIDR,
"--token", k3sToken,
)
args := append(command.Command, command.Args...)

// start func
done := make(chan struct{})
go func() {
defer close(done)

// make sure we scan the output correctly
scan := scanner.NewScanner(reader)
for scan.Scan() {
line := scan.Text()
if len(line) == 0 {
continue
}

// print to our logs
args := []interface{}{"component", "k3s"}
PrintK3sLine(line, args)
}
}()

// start the command
klog.InfoS("Starting k3s", "args", strings.Join(args, " "))
cmd := exec.CommandContext(ctx, args[0], args[1:]...)
cmd.Stdout = writer
cmd.Stderr = writer
err = cmd.Run()

// make sure we wait for scanner to be done
_ = writer.Close()
<-done

// regular stop case
if err != nil && err.Error() != "signal: killed" {
return err
}
return nil
}

func EnsureK3SToken(ctx context.Context, currentNamespaceClient kubernetes.Interface, currentNamespace, vClusterName string) (string, error) {
// check if secret exists
secretName := fmt.Sprintf("vc-k3s-%s", vClusterName)
_, err := currentNamespaceClient.CoreV1().Secrets(currentNamespace).Get(ctx, secretName, metav1.GetOptions{})
secret, err := currentNamespaceClient.CoreV1().Secrets(currentNamespace).Get(ctx, secretName, metav1.GetOptions{})
if err != nil && !kerrors.IsNotFound(err) {
return err
return "", err
} else if err == nil {
return nil
return string(secret.Data["token"]), nil
}

// try to read token file (migration case)
Expand All @@ -31,7 +101,7 @@ func EnsureK3SToken(ctx context.Context, currentNamespaceClient kubernetes.Inter
}

// create k3s secret
_, err = currentNamespaceClient.CoreV1().Secrets(currentNamespace).Create(ctx, &corev1.Secret{
secret, err = currentNamespaceClient.CoreV1().Secrets(currentNamespace).Create(ctx, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: currentNamespace,
Expand All @@ -42,8 +112,14 @@ func EnsureK3SToken(ctx context.Context, currentNamespaceClient kubernetes.Inter
Type: corev1.SecretTypeOpaque,
}, metav1.CreateOptions{})
if err != nil && !kerrors.IsAlreadyExists(err) {
return err
return "", err
} else if err != nil {
// retrieve k3s secret again
secret, err = currentNamespaceClient.CoreV1().Secrets(currentNamespace).Get(ctx, secretName, metav1.GetOptions{})
if err != nil {
return "", err
}
}

return nil
return string(secret.Data["token"]), nil
}
Loading

0 comments on commit 0eacaab

Please sign in to comment.