Skip to content

Commit

Permalink
[controller] Update sds-node-configuration api version (#184)
Browse files Browse the repository at this point in the history
Signed-off-by: Viktor Kramarenko <[email protected]>
  • Loading branch information
ViktorKram authored Sep 25, 2024
1 parent 52a02c9 commit fea67af
Show file tree
Hide file tree
Showing 10 changed files with 122 additions and 78 deletions.
2 changes: 1 addition & 1 deletion api/v1alpha1/replicated_storage_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ type ReplicatedStoragePool struct {

type ReplicatedStoragePoolSpec struct {
Type string `json:"type"`
LvmVolumeGroups []ReplicatedStoragePoolLVMVolumeGroups `json:"lvmVolumeGroups"`
LVMVolumeGroups []ReplicatedStoragePoolLVMVolumeGroups `json:"lvmVolumeGroups"`
}

type ReplicatedStoragePoolLVMVolumeGroups struct {
Expand Down
42 changes: 30 additions & 12 deletions docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -106,14 +106,20 @@ dev-ecf886f85638ee6af563e5f848d2878abae1dcfd worker-0 true 5Gi
```yaml
kubectl apply -f - <<EOF
apiVersion: storage.deckhouse.io/v1alpha1
kind: LvmVolumeGroup
kind: LVMVolumeGroup
metadata:
name: "vg-1-on-worker-0" # The name can be any fully qualified resource name in Kubernetes. This LvmVolumeGroup resource name will be used to create ReplicatedStoragePool in the future
name: "vg-1-on-worker-0" # The name can be any fully qualified resource name in Kubernetes. This LVMVolumeGroup resource name will be used to create ReplicatedStoragePool in the future
spec:
type: Local
blockDeviceNames: # specify the names of the BlockDevice resources that are located on the target node and whose CONSUMABLE is set to true. Note that the node name is not specified anywhere since it is derived from BlockDevice resources.
- dev-0a29d20f9640f3098934bca7325f3080d9b6ef74
- dev-ecf886f85638ee6af563e5f848d2878abae1dcfd
local:
nodeName: "worker-0"
blockDeviceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
- dev-0a29d20f9640f3098934bca7325f3080d9b6ef74
- dev-ecf886f85638ee6af563e5f848d2878abae1dcfd
actualVGNameOnTheNode: "vg-1" # the name of the LVM VG to be created from the above block devices on the node
EOF
```
Expand All @@ -131,13 +137,19 @@ kubectl get lvg vg-1-on-worker-0 -w
```shell
kubectl apply -f - <<EOF
apiVersion: storage.deckhouse.io/v1alpha1
kind: LvmVolumeGroup
kind: LVMVolumeGroup
metadata:
name: "vg-1-on-worker-1"
spec:
type: Local
blockDeviceNames:
- dev-49ff548dfacba65d951d2886c6ffc25d345bb548
local:
nodeName: "worker-1"
blockDeviceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
- dev-49ff548dfacba65d951d2886c6ffc25d345bb548
actualVGNameOnTheNode: "vg-1"
EOF
```
Expand All @@ -155,13 +167,19 @@ kubectl get lvg vg-1-on-worker-1 -w
```shell
kubectl apply -f - <<EOF
apiVersion: storage.deckhouse.io/v1alpha1
kind: LvmVolumeGroup
kind: LVMVolumeGroup
metadata:
name: "vg-1-on-worker-2"
spec:
type: Local
blockDeviceNames:
- dev-75d455a9c59858cf2b571d196ffd9883f1349d2e
local:
nodeName: "worker-2"
blockDeviceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
- dev-75d455a9c59858cf2b571d196ffd9883f1349d2e
actualVGNameOnTheNode: "vg-1"
EOF
```
Expand All @@ -184,7 +202,7 @@ metadata:
name: data
spec:
type: LVM
lvmVolumeGroups: # Here, specify the names of the LvmVolumeGroup resources you created earlier
lvmVolumeGroups: # Here, specify the names of the LVMVolumeGroup resources you created earlier
- name: vg-1-on-worker-0
- name: vg-1-on-worker-1
- name: vg-1-on-worker-2
Expand Down
42 changes: 30 additions & 12 deletions docs/README_RU.md
Original file line number Diff line number Diff line change
Expand Up @@ -106,14 +106,20 @@ dev-ecf886f85638ee6af563e5f848d2878abae1dcfd worker-0 true 5Gi
```yaml
kubectl apply -f - <<EOF
apiVersion: storage.deckhouse.io/v1alpha1
kind: LvmVolumeGroup
kind: LVMVolumeGroup
metadata:
name: "vg-1-on-worker-0" # Имя может быть любым подходящим для имен ресурсов в Kubernetes. Именно это имя ресурса LvmVolumeGroup будет в дальнейшем использоваться для создания ReplicatedStoragePool
name: "vg-1-on-worker-0" # Имя может быть любым подходящим для имен ресурсов в Kubernetes. Именно это имя ресурса LVMVolumeGroup будет в дальнейшем использоваться для создания ReplicatedStoragePool
spec:
type: Local
blockDeviceNames: # указываем имена ресурсов BlockDevice, которые расположены на нужной нам узле и CONSUMABLE которых выставлен в true. Обратите внимание, что имя узлы мы ннигде не указываем. Имя узлы берется из ресурсов BlockDevice
- dev-0a29d20f9640f3098934bca7325f3080d9b6ef74
- dev-ecf886f85638ee6af563e5f848d2878abae1dcfd
local:
nodeName: "worker-0"
blockDeviceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
- dev-0a29d20f9640f3098934bca7325f3080d9b6ef74
- dev-ecf886f85638ee6af563e5f848d2878abae1dcfd
actualVGNameOnTheNode: "vg-1" # имя LVM VG, которая будет создана на узле из указанных выше блочных устройств
EOF
```
Expand All @@ -131,13 +137,19 @@ kubectl get lvg vg-1-on-worker-0 -w
```shell
kubectl apply -f - <<EOF
apiVersion: storage.deckhouse.io/v1alpha1
kind: LvmVolumeGroup
kind: LVMVolumeGroup
metadata:
name: "vg-1-on-worker-1"
spec:
type: Local
blockDeviceNames:
- dev-49ff548dfacba65d951d2886c6ffc25d345bb548
local:
nodeName: "worker-1"
blockDeviceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
- dev-49ff548dfacba65d951d2886c6ffc25d345bb548
actualVGNameOnTheNode: "vg-1"
EOF
```
Expand All @@ -155,13 +167,19 @@ kubectl get lvg vg-1-on-worker-1 -w
```shell
kubectl apply -f - <<EOF
apiVersion: storage.deckhouse.io/v1alpha1
kind: LvmVolumeGroup
kind: LVMVolumeGroup
metadata:
name: "vg-1-on-worker-2"
spec:
type: Local
blockDeviceNames:
- dev-75d455a9c59858cf2b571d196ffd9883f1349d2e
local:
nodeName: "worker-2"
blockDeviceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
- dev-75d455a9c59858cf2b571d196ffd9883f1349d2e
actualVGNameOnTheNode: "vg-1"
EOF
```
Expand All @@ -184,7 +202,7 @@ metadata:
name: data
spec:
type: LVM
lvmVolumeGroups: # Здесь указываем имена ресурсов LvmVolumeGroup, которые мы создавали ранее
lvmVolumeGroups: # Здесь указываем имена ресурсов LVMVolumeGroup, которые мы создавали ранее
- name: vg-1-on-worker-0
- name: vg-1-on-worker-1
- name: vg-1-on-worker-2
Expand Down
2 changes: 1 addition & 1 deletion images/sds-replicated-volume-controller/src/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ go 1.22.3

require (
github.com/LINBIT/golinstor v0.49.0
github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b
github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583
github.com/deckhouse/sds-replicated-volume/api v0.0.0-20240812165341-a73e664454b9
github.com/go-logr/logr v1.4.2
github.com/onsi/ginkgo/v2 v2.19.0
Expand Down
4 changes: 4 additions & 0 deletions images/sds-replicated-volume-controller/src/go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b h1:EYmHWTWcWMpyxJGZK05ZxlIFnh9s66DRrxLw/LNb/xw=
github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0=
github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77 h1:Y3vswUk/rnCpkZzWBk+Mlr9LtMg6EI5LkQ4GvgHCslI=
github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0=
github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583 h1:HQd5YFQqoHj/CQwBKFCyuVCQmNV0PdML8QJiyDka4fQ=
github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0=
github.com/deckhouse/sds-replicated-volume/api v0.0.0-20240812165341-a73e664454b9 h1:keKcnq6do7yxGZHeNERhhx3dH1/wQmj+x5vxcWH3CcI=
github.com/deckhouse/sds-replicated-volume/api v0.0.0-20240812165341-a73e664454b9/go.mod h1:6yz0RtbkLVJtK2DeuvgfaqBZRl5V5ax1WsfPF5pbnvo=
github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 h1:C7t6eeMaEQVy6e8CarIhscYQlNmw5e3G36y7l7Y21Ao=
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,15 +135,15 @@ func NewReplicatedStoragePool(
ephemeralNodesList = append(ephemeralNodesList, node.Name)
}

listDevice := &snc.LvmVolumeGroupList{}
listDevice := &snc.LVMVolumeGroupList{}

err = cl.List(ctx, listDevice)
if err != nil {
log.Error(err, "Error while getting LVM Volume Groups list")
return
}

for _, lvmVolumeGroup := range e.ObjectNew.Spec.LvmVolumeGroups {
for _, lvmVolumeGroup := range e.ObjectNew.Spec.LVMVolumeGroups {
for _, lvg := range listDevice.Items {
if lvg.Name != lvmVolumeGroup.Name {
continue
Expand Down Expand Up @@ -196,7 +196,7 @@ func ReconcileReplicatedStoragePoolEvent(ctx context.Context, cl client.Client,
}

func ReconcileReplicatedStoragePool(ctx context.Context, cl client.Client, lc *lapi.Client, log logger.Logger, replicatedSP *srv.ReplicatedStoragePool) error { // TODO: add shouldRequeue as returned value
ok, msg, lvmVolumeGroups := GetAndValidateVolumeGroups(ctx, cl, replicatedSP.Spec.Type, replicatedSP.Spec.LvmVolumeGroups)
ok, msg, lvmVolumeGroups := GetAndValidateVolumeGroups(ctx, cl, replicatedSP.Spec.Type, replicatedSP.Spec.LVMVolumeGroups)
if !ok {
replicatedSP.Status.Phase = "Failed"
replicatedSP.Status.Reason = msg
Expand All @@ -215,13 +215,13 @@ func ReconcileReplicatedStoragePool(ctx context.Context, cl client.Client, lc *l

failedMsgBuilder.WriteString("Error occurred while creating Storage Pools: ")

for _, replicatedSPLvmVolumeGroup := range replicatedSP.Spec.LvmVolumeGroups {
lvmVolumeGroup, ok := lvmVolumeGroups[replicatedSPLvmVolumeGroup.Name]
for _, replicatedSPLVMVolumeGroup := range replicatedSP.Spec.LVMVolumeGroups {
lvmVolumeGroup, ok := lvmVolumeGroups[replicatedSPLVMVolumeGroup.Name]
nodeName := lvmVolumeGroup.Status.Nodes[0].Name

if !ok {
log.Error(nil, fmt.Sprintf("Error getting LvmVolumeGroup %s from lvmVolumeGroups map: %+v", replicatedSPLvmVolumeGroup.Name, lvmVolumeGroups))
failedMsgBuilder.WriteString(fmt.Sprintf("Error getting LvmVolumeGroup %s from lvmVolumeGroups map. See logs of %s for details; ", replicatedSPLvmVolumeGroup.Name, ReplicatedStoragePoolControllerName))
log.Error(nil, fmt.Sprintf("Error getting LVMVolumeGroup %s from LVMVolumeGroups map: %+v", replicatedSPLVMVolumeGroup.Name, lvmVolumeGroups))
failedMsgBuilder.WriteString(fmt.Sprintf("Error getting LVMVolumeGroup %s from LVMVolumeGroups map. See logs of %s for details; ", replicatedSPLVMVolumeGroup.Name, ReplicatedStoragePoolControllerName))
isSuccessful = false
continue
}
Expand All @@ -232,7 +232,7 @@ func ReconcileReplicatedStoragePool(ctx context.Context, cl client.Client, lc *l
lvmVgForLinstor = lvmVolumeGroup.Spec.ActualVGNameOnTheNode
case TypeLVMThin:
lvmType = lapi.LVM_THIN
lvmVgForLinstor = lvmVolumeGroup.Spec.ActualVGNameOnTheNode + "/" + replicatedSPLvmVolumeGroup.ThinPoolName
lvmVgForLinstor = lvmVolumeGroup.Spec.ActualVGNameOnTheNode + "/" + replicatedSPLVMVolumeGroup.ThinPoolName
}

newStoragePool := lapi.StoragePool{
Expand Down Expand Up @@ -336,56 +336,56 @@ func GetReplicatedStoragePool(ctx context.Context, cl client.Client, namespace,
return obj, err
}

func GetLvmVolumeGroup(ctx context.Context, cl client.Client, name string) (*snc.LvmVolumeGroup, error) {
obj := &snc.LvmVolumeGroup{}
func GetLVMVolumeGroup(ctx context.Context, cl client.Client, name string) (*snc.LVMVolumeGroup, error) {
obj := &snc.LVMVolumeGroup{}
err := cl.Get(ctx, client.ObjectKey{
Name: name,
}, obj)
return obj, err
}

func GetAndValidateVolumeGroups(ctx context.Context, cl client.Client, lvmType string, replicatedSPLVMVolumeGroups []srv.ReplicatedStoragePoolLVMVolumeGroups) (bool, string, map[string]snc.LvmVolumeGroup) {
func GetAndValidateVolumeGroups(ctx context.Context, cl client.Client, lvmType string, replicatedSPLVMVolumeGroups []srv.ReplicatedStoragePoolLVMVolumeGroups) (bool, string, map[string]snc.LVMVolumeGroup) {
var lvmVolumeGroupName string
var nodeName string
nodesWithlvmVolumeGroups := make(map[string]string)
invalidLvmVolumeGroups := make(map[string]string)
invalidLVMVolumeGroups := make(map[string]string)
lvmVolumeGroupsNames := make(map[string]bool)
lvmVolumeGroups := make(map[string]snc.LvmVolumeGroup)
lvmVolumeGroups := make(map[string]snc.LVMVolumeGroup)

for _, g := range replicatedSPLVMVolumeGroups {
lvmVolumeGroupName = g.Name

if lvmVolumeGroupsNames[lvmVolumeGroupName] {
invalidLvmVolumeGroups[lvmVolumeGroupName] = "LvmVolumeGroup name is not unique"
invalidLVMVolumeGroups[lvmVolumeGroupName] = "LVMVolumeGroup name is not unique"
continue
}
lvmVolumeGroupsNames[lvmVolumeGroupName] = true

lvmVolumeGroup, err := GetLvmVolumeGroup(ctx, cl, lvmVolumeGroupName)
lvmVolumeGroup, err := GetLVMVolumeGroup(ctx, cl, lvmVolumeGroupName)
if err != nil {
UpdateMapValue(invalidLvmVolumeGroups, lvmVolumeGroupName, fmt.Sprintf("Error getting LVMVolumeGroup: %s", err.Error()))
UpdateMapValue(invalidLVMVolumeGroups, lvmVolumeGroupName, fmt.Sprintf("Error getting LVMVolumeGroup: %s", err.Error()))
continue
}

if lvmVolumeGroup.Spec.Type != LVMVGTypeLocal {
UpdateMapValue(invalidLvmVolumeGroups, lvmVolumeGroupName, fmt.Sprintf("LvmVolumeGroup type is not %s", LVMVGTypeLocal))
UpdateMapValue(invalidLVMVolumeGroups, lvmVolumeGroupName, fmt.Sprintf("LVMVolumeGroup type is not %s", LVMVGTypeLocal))
continue
}

if len(lvmVolumeGroup.Status.Nodes) != 1 {
UpdateMapValue(invalidLvmVolumeGroups, lvmVolumeGroupName, "LvmVolumeGroup has more than one node in status.nodes. LvmVolumeGroup for LINSTOR Storage Pool must to have only one node")
UpdateMapValue(invalidLVMVolumeGroups, lvmVolumeGroupName, "LVMVolumeGroup has more than one node in status.nodes. LVMVolumeGroup for LINSTOR Storage Pool must to have only one node")
continue
}

nodeName = lvmVolumeGroup.Status.Nodes[0].Name
if value, ok := nodesWithlvmVolumeGroups[nodeName]; ok {
UpdateMapValue(invalidLvmVolumeGroups, lvmVolumeGroupName, fmt.Sprintf("This LvmVolumeGroup have same node %s as LvmVolumeGroup with name: %s. LINSTOR Storage Pool is allowed to have only one LvmVolumeGroup per node", nodeName, value))
UpdateMapValue(invalidLVMVolumeGroups, lvmVolumeGroupName, fmt.Sprintf("This LVMVolumeGroup have same node %s as LVMVolumeGroup with name: %s. LINSTOR Storage Pool is allowed to have only one LVMVolumeGroup per node", nodeName, value))
}

switch lvmType {
case TypeLVMThin:
if len(g.ThinPoolName) == 0 {
UpdateMapValue(invalidLvmVolumeGroups, lvmVolumeGroupName, fmt.Sprintf("type %s but ThinPoolName is not set", TypeLVMThin))
UpdateMapValue(invalidLVMVolumeGroups, lvmVolumeGroupName, fmt.Sprintf("type %s but ThinPoolName is not set", TypeLVMThin))
break
}
found := false
Expand All @@ -396,20 +396,20 @@ func GetAndValidateVolumeGroups(ctx context.Context, cl client.Client, lvmType s
}
}
if !found {
UpdateMapValue(invalidLvmVolumeGroups, lvmVolumeGroupName, fmt.Sprintf("ThinPoolName %s is not found in Spec.ThinPools of LvmVolumeGroup %s", g.ThinPoolName, lvmVolumeGroupName))
UpdateMapValue(invalidLVMVolumeGroups, lvmVolumeGroupName, fmt.Sprintf("ThinPoolName %s is not found in Spec.ThinPools of LVMVolumeGroup %s", g.ThinPoolName, lvmVolumeGroupName))
}
case TypeLVM:
if len(g.ThinPoolName) != 0 {
UpdateMapValue(invalidLvmVolumeGroups, lvmVolumeGroupName, fmt.Sprintf("type %s but ThinPoolName is set", TypeLVM))
UpdateMapValue(invalidLVMVolumeGroups, lvmVolumeGroupName, fmt.Sprintf("type %s but ThinPoolName is set", TypeLVM))
}
}

nodesWithlvmVolumeGroups[nodeName] = lvmVolumeGroupName
lvmVolumeGroups[lvmVolumeGroupName] = *lvmVolumeGroup
}

if len(invalidLvmVolumeGroups) > 0 {
msg := GetOrderedMapValuesAsString(invalidLvmVolumeGroups)
if len(invalidLVMVolumeGroups) > 0 {
msg := GetOrderedMapValuesAsString(invalidLVMVolumeGroups)
return false, msg, nil
}

Expand Down
Loading

0 comments on commit fea67af

Please sign in to comment.