Skip to content

Commit

Permalink
Fixed Teardown
Browse files Browse the repository at this point in the history
Removed some un used code, added missing docstrings

Signed-off-by: Aviadp <[email protected]>
  • Loading branch information
AviadP committed Jul 22, 2024
1 parent b58772d commit 220bfaa
Show file tree
Hide file tree
Showing 3 changed files with 71 additions and 74 deletions.
2 changes: 2 additions & 0 deletions ocs_ci/ocs/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,8 @@
PROVISIONING = "Provisioning"
AGENT_SERVICE_CONFIG = "AgentServiceConfig"
INFRA_ENV = "InfraEnv"
DEFALUT_DEVICE_CLASS = "ssd"


# Provisioners
AWS_EFS_PROVISIONER = "openshift.org/aws-efs"
Expand Down
75 changes: 37 additions & 38 deletions ocs_ci/ocs/replica_one.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from logging import getLogger
from typing import List # To be removed when python 3.8 support is dropped

from ocs_ci.framework import config
from ocs_ci.ocs.resources.pod import (
Expand All @@ -16,16 +15,36 @@
DEPLOYMENT,
STORAGECLUSTER,
STATUS_READY,
REPLICA1_STORAGECLASS,
)
from ocs_ci.ocs.exceptions import CommandFailed


log = getLogger(__name__)

REPLICA1_STORAGECLASS = "ocs-storagecluster-ceph-non-resilient-rbd"
_FAILURE_DOMAINS = None


def get_failures_domain_name() -> List[str]:
def get_failure_domains() -> list[str]:
"""
Gets Cluster Failure Domains
Returns:
list: Failure Domains names
"""
global _FAILURE_DOMAINS
if _FAILURE_DOMAINS is None:
try:
_FAILURE_DOMAINS = config.ENV_DATA.get(
"worker_availability_zones", get_failures_domain_name()
)
except CommandFailed as e:
print(f"Error initializing FAILURE_DOMAINS: {e}")
_FAILURE_DOMAINS = []
return _FAILURE_DOMAINS


def get_failures_domain_name() -> list[str]:
"""
Fetch Failure domains from cephblockpools names
Expand All @@ -37,10 +56,12 @@ def get_failures_domain_name() -> List[str]:
failure_domains = list()
cephblockpools_names = list()
prefix = DEFAULT_CEPHBLOCKPOOL
for i in range(0, len((cbp_object.data["items"]))):
cephblockpools_names.append(cbp_object.data["items"][i]["metadata"]["name"])

log.info(f"Cephblockpool names:{cephblockpools_names}")
items = cbp_object.data.get("items", [])
for i in range(len(items)):
name = items[i].get("metadata", {}).get("name")
if name:
cephblockpools_names.append(name)
log.info(f"Cephblockpool names:{cephblockpools_names}")

for name in cephblockpools_names:
if name.startswith(prefix):
Expand All @@ -54,11 +75,6 @@ def get_failures_domain_name() -> List[str]:
return failure_domains


FAILURE_DOMAINS = config.ENV_DATA.get(
"worker_availability_zones", get_failures_domain_name()
)


def get_replica_1_osds() -> dict:
"""
Gets the names and IDs of OSD associated with replica1
Expand All @@ -69,7 +85,7 @@ def get_replica_1_osds() -> dict:
"""
replica1_osds = dict()
all_osds = get_pods_having_label(label=OSD_APP_LABEL)
for domain in FAILURE_DOMAINS:
for domain in get_failure_domains():
for osd in all_osds:
if osd["metadata"]["labels"]["ceph.rook.io/DeviceSet"] == domain:
replica1_osds[osd["metadata"]["name"]] = osd["metadata"]["labels"][
Expand All @@ -79,7 +95,7 @@ def get_replica_1_osds() -> dict:
return replica1_osds


def get_replica1_osd_deployment() -> List[str]:
def get_replica1_osd_deployment() -> list[str]:
"""
Gets the names of OSD deployments associated with replica1
Expand All @@ -103,15 +119,15 @@ def get_replica1_osd_deployment() -> List[str]:
for deployment in osd_deployment:
if (
deployment["metadata"]["labels"]["ceph.rook.io/DeviceSet"]
in FAILURE_DOMAINS
in get_failure_domains()
):
log.info(deployment["metadata"]["name"])
replica1_osd_deployment.append(deployment["metadata"]["name"])

return replica1_osd_deployment


def scaledown_deployment(deployments_name: List[str]) -> None:
def scaledown_deployment(deployments_name: list[str]) -> None:
"""
Scale down deployments to 0
Expand Down Expand Up @@ -173,31 +189,14 @@ def delete_replica1_cephblockpools_cr(cbp_object: OCP):
Deletes CR of cephblockpools associated with replica1
Args:
cbp_object (OCP): OCP object with kind=CEPHBLOCKPOOL
"""
for i in range(0, len((cbp_object.data["items"]))):
cbp_cr_name = cbp_object.data["items"][i]["metadata"]["name"]
if cbp_cr_name != DEFAULT_CEPHBLOCKPOOL:
cbp_object.delete(resource_name=cbp_cr_name)


def delete_replica1_cephblockpools(cbp_object: OCP):
"""
Deletes cephblockpools associated with replica1
Args:
cbp_object (OCP): OCP object with kind=CEPHBLOCKPOOL
cbp_object (ocp.OCP): OCP object with kind=CEPHBLOCKPOOL
"""
toolbox_pod = get_ceph_tools_pod()
for i in range(0, len((cbp_object.data["items"]))):
replica1_pool_name = cbp_object.data["items"][i]["metadata"]["name"]
if replica1_pool_name != DEFAULT_CEPHBLOCKPOOL:
command = f"ceph osd pool rm {replica1_pool_name} {replica1_pool_name} --yes-i-really-really-mean-it"
toolbox_pod.exec_cmd_on_pod(command)

log.info(f"deleting {replica1_pool_name}")
cbp_cr_name = cbp_object.data["items"][i]["spec"]["deviceClass"]
log.info(f"cbp_cr_name: {cbp_cr_name}")
if cbp_cr_name in get_failure_domains():
cbp_object.delete(resource_name=(f"{DEFAULT_CEPHBLOCKPOOL}-{cbp_cr_name}"))


def modify_replica1_osd_count(new_osd_count):
Expand Down
68 changes: 32 additions & 36 deletions tests/functional/storageclass/test_replica1.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,56 +23,45 @@
REPLICA1_STORAGECLASS,
VOLUME_MODE_BLOCK,
CSI_RBD_RAW_BLOCK_POD_YAML,
DEFALUT_DEVICE_CLASS,
)
from ocs_ci.helpers.helpers import create_pvc
from ocs_ci.ocs.replica_one import (
delete_replica1_cephblockpools,
delete_replica_1_sc,
get_osd_pgs_used,
purge_replica1_osd,
delete_replica1_cephblockpools_cr,
get_replica1_osd_deployment,
count_osd_pods,
modify_replica1_osd_count,
get_osd_kb_used_data,
get_device_class_from_ceph,
get_all_osd_names_by_device_class,
FAILURE_DOMAINS,
get_failure_domains,
)


log = getLogger(__name__)

DEFALUT_DEVICE_CLASS = "ssd"

def create_pod_on_failure_domain(project_factory, pod_factory, failure_domain: str):
"""
Creates a pod on the specified failure domain.
Args:
failure_domain (str): Failure domain to create the pod on.
def create_replica1_pvc(project_factory) -> None:
Returns:
Pod: Pod object
"""
proj_obj = project_factory()
proj = proj_obj.namespace
create_pvc(
pvc = create_pvc(
namespace=proj,
sc_name=REPLICA1_STORAGECLASS,
size="80G",
access_mode=ACCESS_MODE_RWO,
)


def create_pvc_for_project(project, size="80G", access_mode=ACCESS_MODE_RWO):
return create_pvc(
namespace=project,
sc_name=REPLICA1_STORAGECLASS,
size=size,
access_mode=access_mode,
)


def create_pod_on_failure_domain(
project_factory, pod_factory, failure_domain: str
) -> None:
proj_obj = project_factory()
proj = proj_obj.namespace
pvc = create_pvc_for_project(proj)

node = {"topology.kubernetes.io/zone": failure_domain}
return pod_factory(pvc=pvc, node_selector=node)

Expand All @@ -98,7 +87,20 @@ def validate_dict_values(input_dict: dict) -> bool:

def compare_dictionaries(
dict1: dict, dict2: dict, known_different_keys: list, tolerance: int = 10
):
) -> dict:
"""
Compares two dictionaries and returns a dictionary with the keys that have different values,
but allow a tolerance between this values.
Args:
dict1 (dict): dictionary to compare.
dict2 (dict): dictionary to compare.
known_different_keys (list): keys to ignore from the comparison.
tolerance (int): level of tolerance by precentage. Defaults to 10.
Returns:
dict: difrerences between the two dictionaries.
"""
differences = dict()

for key in dict1.keys():
Expand Down Expand Up @@ -126,7 +128,7 @@ def compare_dictionaries(

@pytest.fixture(scope="function", autouse=False)
def setup_replica1(
request: pytest.FixtureRequest,
request,
pod_factory,
project_factory,
):
Expand All @@ -143,33 +145,27 @@ def setup_replica1(
)

yield
# This part is not working at the moment
log.info("Teardown fixture called")
cephblockpools = OCP(kind=CEPHBLOCKPOOL)
set_non_resilient_pool(storage_cluster, enable=False)
storage_cluster.wait_for_resource(
condition=STATUS_READY, column="PHASE", timeout=180, sleep=15
)
delete_replica_1_sc()
log.info("StorageClass Deleted")
delete_replica1_cephblockpools_cr(cephblockpools)
delete_replica1_cephblockpools(cephblockpools)
deployments_name = get_replica1_osd_deployment()
log.info(deployments_name)
purge_replica1_osd()
# End
storage_cluster.wait_for_resource(
condition=STATUS_READY, column="PHASE", timeout=1800, sleep=60
)


@polarion_id("OCS-5720")
@brown_squad
@bugzilla("2274175")
@tier1
class TestReplicaOne:
osd_before_test = count_osd_pods()

def test_cluster_before_configuration(
self, pod_factory, pvc_factory, project_factory
):
self.osd_before_test = count_osd_pods()
self.kb_before_workload = get_osd_kb_used_data()
log.info(f"{self.kb_before_workload} KB used before test")
self.device_class_before_test = get_device_class_from_ceph()
Expand Down Expand Up @@ -200,7 +196,7 @@ def test_cluster_before_configuration(
def test_configure_replica1(self, project_factory, pod_factory, setup_replica1):
log.info("Starting Tier1 replica one test")

failure_domains = FAILURE_DOMAINS
failure_domains = get_failure_domains()
testing_pod = create_pod_on_failure_domain(
project_factory,
pod_factory,
Expand Down

0 comments on commit 220bfaa

Please sign in to comment.