diff --git a/ocs_ci/ocs/utils.py b/ocs_ci/ocs/utils.py index 1b323008f20..44ac3ed3514 100644 --- a/ocs_ci/ocs/utils.py +++ b/ocs_ci/ocs/utils.py @@ -13,6 +13,7 @@ import yaml from gevent import sleep +from pathlib import Path from libcloud.common.exceptions import BaseHTTPError from libcloud.common.types import LibcloudError from libcloud.compute.providers import get_driver @@ -970,6 +971,8 @@ def run_must_gather(log_dir_path, image, command=None, cluster_config=None): timeout=must_gather_timeout, cluster_config=cluster_config, ) + if config.DEPLOYMENT["external_mode"]: + collect_ceph_external(path=log_dir_path) except CommandFailed as ex: log.error( f"Failed during must gather logs! Error: {ex}" @@ -986,6 +989,30 @@ def run_must_gather(log_dir_path, image, command=None, cluster_config=None): return mg_output +def collect_ceph_external(path): + """ + Collect ceph commands via cli tool on External mode cluster + + Args: + path(str): The destination for saving the ceph files [output ceph commands] + + """ + try: + kubeconfig_path = os.path.join( + config.ENV_DATA["cluster_path"], config.RUN["kubeconfig_location"] + ) + current_dir = Path(__file__).parent.parent.parent + script_path = os.path.join(current_dir, "scripts", "bash", "mg_external.sh") + run_cmd( + f"sh {script_path} {os.path.join(path, 'ceph_external')} {kubeconfig_path}", + timeout=100, + ) + except Exception as ex: + log.info( + f"Failed to execute the ceph commands script due to the error {str(ex)}" + ) + + def export_mg_pods_logs(log_dir_path): """ Export must gather pods logs diff --git a/scripts/bash/mg_external.sh b/scripts/bash/mg_external.sh new file mode 100644 index 00000000000..b3ad6ac8f27 --- /dev/null +++ b/scripts/bash/mg_external.sh @@ -0,0 +1,151 @@ +#!/usr/bin/env bash + +set -x + +dbglog() { + # Allow the input to be piped + declare msg=${1:-$(>"${COMMAND_OUTPUT_DIR}/config_$i"; } >>"${COMMAND_ERR_OUTPUT_DIR}"/gather-config-"$i"-debug.log 2>&1 & + pids_ceph+=($!) +done +# Check if PID array has any values, if so, wait for them to finish +if [ ${#pids[@]} -ne 0 ]; then + echo "Waiting on subprocesses to finish execution." + wait "${pids[@]}" +fi + + +# Collecting output of ceph commands +for ((i = 0; i < ${#ceph_commands1[@]}; i++)); do + dbglog "collecting command output for: ${ceph_commands1[$i]}" + COMMAND_OUTPUT_FILE=${COMMAND_OUTPUT_DIR}/${ceph_commands1[$i]// /_} + JSON_COMMAND_OUTPUT_FILE=${COMMAND_JSON_OUTPUT_DIR}/${ceph_commands1[$i]// /_}_--format_json-pretty + { timeout 120 oc --kubeconfig="${KUBECONFIG}" -n "${ns}" exec "${TOOL_POD_NAME}" -- bash -c "${ceph_commands1[$i]} --connect-timeout=15" >>"${COMMAND_OUTPUT_FILE}"; } >>"${COMMAND_ERR_OUTPUT_DIR}"/gather-"${ceph_commands1[$i]}"-debug.log 2>&1 & + pids_ceph+=($!) + { timeout 120 oc --kubeconfig="${KUBECONFIG}" -n "${ns}" exec "${TOOL_POD_NAME}" -- bash -c "${ceph_commands1[$i]} --connect-timeout=15 --format json-pretty" >>"${JSON_COMMAND_OUTPUT_FILE}"; } >>"${COMMAND_ERR_OUTPUT_DIR}"/gather-"${ceph_commands1[$i]}"-json-debug.log 2>&1 & + pids_ceph+=($!) +done +# Check if PID array has any values, if so, wait for them to finish +if [ ${#pids[@]} -ne 0 ]; then + echo "Waiting on subprocesses to finish execution." + wait "${pids[@]}" +fi + + +# Collecting output of ceph commands +for ((i = 0; i < ${#ceph_commands2[@]}; i++)); do + dbglog "collecting command output for: ${ceph_commands2[$i]}" + COMMAND_OUTPUT_FILE=${COMMAND_OUTPUT_DIR}/${ceph_commands2[$i]// /_} + JSON_COMMAND_OUTPUT_FILE=${COMMAND_JSON_OUTPUT_DIR}/${ceph_commands2[$i]// /_}_--format_json-pretty + { timeout 120 oc --kubeconfig="${KUBECONFIG}" -n "${ns}" exec "${TOOL_POD_NAME}" -- bash -c "${ceph_commands2[$i]} --connect-timeout=15" >>"${COMMAND_OUTPUT_FILE}"; } >>"${COMMAND_ERR_OUTPUT_DIR}"/gather-"${ceph_commands2[$i]}"-debug.log 2>&1 & + pids_ceph+=($!) + { timeout 120 oc --kubeconfig="${KUBECONFIG}" -n "${ns}" exec "${TOOL_POD_NAME}" -- bash -c "${ceph_commands2[$i]} --connect-timeout=15 --format json-pretty" >>"${JSON_COMMAND_OUTPUT_FILE}"; } >>"${COMMAND_ERR_OUTPUT_DIR}"/gather-"${ceph_commands2[$i]}"-json-debug.log 2>&1 & + pids_ceph+=($!) +done + +# Check if PID array has any values, if so, wait for them to finish +if [ ${#pids[@]} -ne 0 ]; then + echo "Waiting on subprocesses to finish execution." + wait "${pids[@]}" +fi diff --git a/tests/manage/z_cluster/test_bz.py b/tests/manage/z_cluster/test_bz.py new file mode 100644 index 00000000000..0b02f15d8f0 --- /dev/null +++ b/tests/manage/z_cluster/test_bz.py @@ -0,0 +1,26 @@ +import logging + +from ocs_ci.framework.testlib import ManageTest + + +logger = logging.getLogger(__name__) + + +class TestAcceptance(ManageTest): + """ + Acceptance test Managed Service + + """ + + def test_acceptance(self): + assert 1 == 2 + # from ocs_ci.ocs.utils import _collect_ocs_logs + # from ocs_ci.framework import config as ocsci_config + # _collect_ocs_logs( + # ocsci_config, + # dir_name="/home/oviner/test/test2", + # ocp=False, + # ocs=True, + # mcg=False, + # ) + # a=1