From 7caff917f30d05fb9597cb1f53a55b457562bef2 Mon Sep 17 00:00:00 2001 From: smit-gardhariya Date: Tue, 28 Nov 2023 17:13:19 +0530 Subject: [PATCH] Create CPU/Memory/FileIO perf test using sysbench This commit has changes to add testcases for CPU/Memory/FileIO using sysbench tool of LISA. Signed-off-by: Smit Gardhariya --- microsoft/testsuites/sysbench/sysbenchperf.py | 406 ++++++++++++++++++ 1 file changed, 406 insertions(+) create mode 100644 microsoft/testsuites/sysbench/sysbenchperf.py diff --git a/microsoft/testsuites/sysbench/sysbenchperf.py b/microsoft/testsuites/sysbench/sysbenchperf.py new file mode 100644 index 0000000000..d042ffe431 --- /dev/null +++ b/microsoft/testsuites/sysbench/sysbenchperf.py @@ -0,0 +1,406 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import json +import re +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Dict, List + +from assertpy.assertpy import fail + +from lisa import ( + Environment, + Logger, + Node, + TestCaseMetadata, + TestSuite, + TestSuiteMetadata, + notifier, +) +from lisa.messages import SubTestMessage, TestStatus, send_sub_test_result_message +from lisa.testsuite import TestResult +from lisa.tools import Sysbench +from lisa.util.process import ExecutableResult + + +@dataclass +class SysbenchTestResult: + name: str = "" + status: TestStatus = TestStatus.QUEUED + message: str = "" + + +@TestSuiteMetadata( + area="sysbench", + category="performance", + description=""" + This test suite is for executing the sysbench tests + """, +) +class SysbenchPerfTestSuite(TestSuite): + @TestCaseMetadata( + description=""" + Runs Sysbench test for cpu + """, + priority=3, + ) + def verify_sysbench_cpu_perf( + self, + log: Logger, + node: Node, + environment: Environment, + log_path: Path, + result: TestResult, + variables: Dict[str, Any], + ) -> None: + subtest_name: str = "default_cpu_perf_check" + res: SysbenchTestResult = SysbenchTestResult() + + output: ExecutableResult = node.tools[Sysbench].run_cpu_perf() + + if output.exit_code != 0: + res.name = subtest_name + res.message = f"{subtest_name} failed with " f"exit code {output.exit_code}" + res.status = TestStatus.FAILED + else: + res = process_cpu_perf_result(output.stdout, subtest_name) + + self._send_subtest_msg( + result, + res.name, + res.status, + res.message, + ) + + if res.status == TestStatus.FAILED: + fail( + f"Sysbench-cpu test failed with exit code: " + f"{output.exit_code}, cmd {output.cmd}" + ) + + @TestCaseMetadata( + description=""" + Runs Sysbench test for fileio + """, + priority=3, + ) + def verify_sysbench_fileio_perf( + self, + log: Logger, + node: Node, + environment: Environment, + log_path: Path, + result: TestResult, + variables: Dict[str, Any], + ) -> None: + io_mode_ops = { + "seqwr": "write", + "seqrd": "read", + "rndrd": "read", + "rndwr": "write", + "seqrewr": "all", + "rndrw": "all", + } + sysbench = node.tools[Sysbench] + failure = [] + for mode in io_mode_ops.keys(): + res: SysbenchTestResult = SysbenchTestResult() + subtest_name: str = f"{mode}_default_fileio_perf_check" + output: ExecutableResult = sysbench.run_fileio_perf( + test_mode=mode, + total_file=1, + ) + if output.exit_code != 0: + failure.append(f"{output.cmd}") + res.name = subtest_name + res.message = ( + f"{subtest_name} failed with " f"exit code {output.exit_code}" + ) + res.status = TestStatus.FAILED + else: + res = process_fileio_perf_result( + output.stdout, + subtest_name, + io_mode_ops[mode], + ) + + log.debug(res) + + self._send_subtest_msg( + result, + res.name, + res.status, + res.message, + ) + + if len(failure) > 0: + log.debug(f"Failed count: {len(failure)}") + fail(f"Sysbench-fileio test failed: {failure}") + + @TestCaseMetadata( + description=""" + Runs Sysbench test for memory + """, + priority=3, + ) + def verify_sysbench_memory_perf( + self, + log: Logger, + node: Node, + environment: Environment, + log_path: Path, + result: TestResult, + variables: Dict[str, Any], + ) -> None: + memory_operation: List[str] = ["read", "write"] + memory_access_mode: List[str] = ["seq", "rnd"] + failure = [] + sysbench = node.tools[Sysbench] + + for op in memory_operation: + for access_mode in memory_access_mode: + subtest_name: str = f"{access_mode}_{op}_default_memory_perf_check" + res: SysbenchTestResult = SysbenchTestResult() + output: ExecutableResult = sysbench.run_memory_perf( + memory_access_mode=access_mode, + memory_oper=op, + ) + + if output.exit_code != 0: + failure.append(f"{output.cmd}") + res.name = subtest_name + res.message = ( + f"{subtest_name} failed with " f"exit code {output.exit_code}" + ) + res.status = TestStatus.FAILED + else: + res = process_memory_perf_result( + output.stdout, + subtest_name, + ) + + self._send_subtest_msg( + result, + res.name, + res.status, + res.message, + ) + + if len(failure) > 0: + fail(f"Sysbench-memory test failed: {failure}") + + def _send_subtest_msg( + self, + test_result: TestResult, + test_name: str, + test_status: TestStatus, + test_message: str = "", + ) -> None: + subtest_msg = send_sub_test_result_message( + test_result=test_result, + test_case_name=test_name, + test_status=test_status, + test_message=test_message, + ) + + notifier.notify(subtest_msg) + + +def process_perf_result(data: str, testcase_name: str) -> SysbenchTestResult: + non_debug_pattern = r"^(?!.*DEBUG: ).*$" + debug_patern = r".*DEBUG: .*" + + result: Dict[str, Any] = {} + + # Find all non-DEBUG lines in the text + non_debug_text = "\n".join( + re.findall( + non_debug_pattern, + data, + re.MULTILINE, + ) + ) + debug_text = "\n".join( + re.findall( + debug_patern, + data, + re.MULTILINE, + ) + ) + + # Extract total time using regular expression + total_time = re.search( + r"total time:\s+([\d.]+s)", + non_debug_text, + ) + if total_time: + result["total_time"] = total_time.group(1) + + # Extract total number of events using regular expression + total_events = re.search( + r"total number of events:\s+(\d+)", + non_debug_text, + ) + if total_events: + result["total_events"] = total_events.group(1) + + # Extract latency information using regular expressions + latency_param = "latency_ms" + result[latency_param] = {} + latency_info = re.findall( + r"(min|avg|max|95th percentile|sum):\s+([\d.]+)", + non_debug_text, + ) + for latency in latency_info: + result[latency_param][latency[0]] = latency[1] + + thread_param = "thread_fairness" + result[thread_param] = {} + thread_events = re.search( + # r'events \(avg/stddev\):\s+\d+\.\d+/\d+\.\d+', + r"events \(avg/stddev\):\s+([\d.]+)/([\d.]+)", + non_debug_text, + ) + if thread_events: + result[thread_param]["events_avg"] = thread_events.group(1) + result[thread_param]["events_stddev"] = thread_events.group(2) + thread_exec_time = re.search( + # r'events \(avg/stddev\):\s+\d+\.\d+/\d+\.\d+', + r"execution time \(avg/stddev\):\s+([\d.]+)/([\d.]+)", + non_debug_text, + ) + if thread_exec_time: + result[thread_param]["execution_time_avg"] = thread_exec_time.group(1) + result[thread_param]["execution_time_stddev"] = thread_exec_time.group(2) + + # Get Verbose per-thread statistics + verbose_per_thread_statistics = re.findall( + r"(min|avg|max|events):\s+([\d.]+)", + debug_text, + ) + verbose_param = "verbose_per_thread_statistics" + result[verbose_param] = {} + + for stat in verbose_per_thread_statistics: + result[verbose_param][stat[0]] = stat[1] + + event_execution_time = re.search( + r"total time taken by event execution:\s+([\d.]+s)", + debug_text, + ) + if event_execution_time: + result[verbose_param]["event_execution_time"] = event_execution_time.group(1) + + return SysbenchTestResult( + name=testcase_name, + status=TestStatus.PASSED, + message=json.dumps(result), + ) + + +def process_memory_perf_result( + data: str, + testcase_name: str, +) -> SysbenchTestResult: + result: SysbenchTestResult = process_perf_result(data, testcase_name) + message: Dict[str, Any] = json.loads(result.message) + + total_operations = None + operations_per_second = None + total_mib_transferred = None + mib_per_second = None + + # Extract Total operations and operations per second + total_operations_match = re.search( + r"Total operations: (\d+) \(([\d.]+) per second\)", + data, + ) + if total_operations_match: + total_operations = int(total_operations_match.group(1)) + operations_per_second = float(total_operations_match.group(2)) + + # Extract Total MiB transferred and MiB per second + total_mib_transferred_match = re.search( + r"([\d.]+) MiB transferred \(([\d.]+) MiB/sec\)", + data, + ) + if total_mib_transferred_match: + total_mib_transferred = float(total_mib_transferred_match.group(1)) + mib_per_second = float(total_mib_transferred_match.group(2)) + + message["total_operations"] = total_operations + message["operations_per_second"] = operations_per_second + message["total_mib_transferred"] = total_mib_transferred + message["mib_per_second"] = mib_per_second + + result.message = json.dumps(message) + return result + + +def process_cpu_perf_result( + data: str, + testcase_name: str, +) -> SysbenchTestResult: + result: SysbenchTestResult = process_perf_result(data, testcase_name) + message: Dict[str, Any] = json.loads(result.message) + + # Extract CPU speed using regular expression + cpu_speed = re.search( + r"events per second:\s+([\d.]+)", + data, + ) + if cpu_speed: + message["cpu_speed"] = cpu_speed.group(1) + + result.message = json.dumps(message) + return result + + +def process_fileio_perf_result( + data: str, + testcase_name: str, + ops: str, +) -> SysbenchTestResult: + result: SysbenchTestResult = process_perf_result(data, testcase_name) + message: Dict[str, Any] = json.loads(result.message) + + reg_ex_io_per_sec = None + if ops == "write" or ops == "all": + reg_ex_io_per_sec = r"writes/s:\s+([\d.]+)" + io_per_sec = re.search( + reg_ex_io_per_sec, + data, + ) + message["write_io_per_sec"] = io_per_sec.group(1) if io_per_sec else 0 + if ops == "read" or ops == "all": + reg_ex_io_per_sec = r"reads/s:\s+([\d.]+)" + io_per_sec = re.search( + reg_ex_io_per_sec, + data, + ) + message["read_io_per_sec"] = io_per_sec.group(1) if io_per_sec else 0 + + fsyncs_per_sec = re.search( + r"fsyncs/s:\s+([\d.]+)", + data, + ) + message["fsyncs_per_sec"] = fsyncs_per_sec.group(1) if fsyncs_per_sec else 0 + + reg_ex_mib_per_sec = None + if ops == "write" or ops == "all": + reg_ex_mib_per_sec = r"written, MiB/s:\s+([\d.]+)" + mib_per_sec = re.search( + reg_ex_mib_per_sec, + data, + ) + message["write_mib_per_sec"] = mib_per_sec.group(1) if mib_per_sec else 0 + if ops == "read" or ops == "all": + reg_ex_mib_per_sec = r"read, MiB/s:\s+([\d.]+)" + mib_per_sec = re.search( + reg_ex_mib_per_sec, + data, + ) + message["read_mib_per_sec"] = mib_per_sec.group(1) if mib_per_sec else 0 + + result.message = json.dumps(message) + return result