diff --git a/lisa/tools/__init__.py b/lisa/tools/__init__.py index 3d77b098b6..07188c4e07 100644 --- a/lisa/tools/__init__.py +++ b/lisa/tools/__init__.py @@ -67,6 +67,7 @@ from .lsvmbus import Lsvmbus from .make import Make from .mdadm import Mdadm +from .meson import Meson from .mkdir import Mkdir from .mkfs import FileSystem, Mkfs, Mkfsext, Mkfsxfs from .modinfo import Modinfo @@ -184,6 +185,7 @@ "Lspci", "Lsvmbus", "Make", + "Meson", "Mdadm", "Mkdir", "Mkfs", diff --git a/lisa/tools/ln.py b/lisa/tools/ln.py index 143e6c6360..d950ceea61 100644 --- a/lisa/tools/ln.py +++ b/lisa/tools/ln.py @@ -13,10 +13,18 @@ def command(self) -> str: def can_install(self) -> bool: return False - def create_link(self, target: str, link: str, is_symbolic: bool = True) -> None: + def create_link( + self, + target: str, + link: str, + is_symbolic: bool = True, + force: bool = False, + ) -> None: cmd = "" if is_symbolic: cmd += " -s " + if force: + cmd += " -f " cmd += f"{target} {link}" self.run( cmd, diff --git a/lisa/tools/meson.py b/lisa/tools/meson.py new file mode 100644 index 0000000000..1fdf3d510d --- /dev/null +++ b/lisa/tools/meson.py @@ -0,0 +1,56 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from pathlib import PurePath +from typing import cast + +from semver import VersionInfo + +from lisa.executable import Tool +from lisa.operating_system import Posix + +from .ln import Ln +from .python import Pip +from .whoami import Whoami + + +class Meson(Tool): + @property + def command(self) -> str: + return "meson" + + def _check_exists(self) -> bool: + result = self.node.execute("meson --version", shell=True) + return result.exit_code == 0 and VersionInfo.parse(result.stdout) >= "0.52.0" + + @property + def can_install(self) -> bool: + return self.node.is_posix + + def _install(self) -> bool: + posix_os: Posix = cast(Posix, self.node.os) + # use pip to make sure we install a recent version + if (not posix_os.package_exists("meson")) or posix_os.get_package_information( + "meson", use_cached=False + ) < "0.52.0": + username = self.node.tools[Whoami].get_username() + self.node.tools[Pip].install_packages("meson", install_to_user=True) + # environment variables won't expand even when using shell=True :\ + self.node.tools[Ln].create_link( + f"/home/{username}/.local/bin/meson", "/usr/bin/meson", force=True + ) + + return self._check_exists() + + def setup(self, args: str, cwd: PurePath, build_dir: str = "build") -> PurePath: + self.run( + f"{args} {build_dir}", + force_run=True, + shell=True, + cwd=cwd, + expected_exit_code=0, + expected_exit_code_failure_message=( + f"Could not configure {str(cwd)} with meson using args {args}" + ), + ) + return cwd.joinpath(build_dir) diff --git a/lisa/tools/python.py b/lisa/tools/python.py index 90de8f55c8..c13cc16e5e 100644 --- a/lisa/tools/python.py +++ b/lisa/tools/python.py @@ -60,9 +60,14 @@ def _install(self) -> bool: self.node.os.install_packages(package_name) return self._check_exists() - def install_packages(self, packages_name: str, install_path: str = "") -> None: + def install_packages( + self, packages_name: str, install_path: str = "", install_to_user: bool = False + ) -> None: node = self.node - cmd_line = f"install -q {packages_name}" + if not install_to_user: + cmd_line = f"install -q {packages_name}" + else: + cmd_line = f"install --user -q {packages_name}" envs = {} if install_path != "": diff --git a/microsoft/testsuites/dpdk/common.py b/microsoft/testsuites/dpdk/common.py index aeb92dbfbc..843e27456b 100644 --- a/microsoft/testsuites/dpdk/common.py +++ b/microsoft/testsuites/dpdk/common.py @@ -2,12 +2,17 @@ # Licensed under the MIT license. from datetime import datetime -from typing import Any, Dict +from pathlib import PurePath +from typing import Any, Callable, Dict, List, Optional, Sequence, Type, Union from assertpy import assert_that +from semver import VersionInfo +from urllib3.util.url import parse_url from lisa import Node -from lisa.operating_system import Debian, Oracle, Redhat, Suse, Ubuntu +from lisa.executable import Tool +from lisa.operating_system import Debian, Fedora, Oracle, Posix, Redhat, Suse, Ubuntu +from lisa.tools import Git, Tar, Wget from lisa.util import UnsupportedDistroException DPDK_STABLE_GIT_REPO = "https://dpdk.org/git/dpdk-stable" @@ -17,6 +22,256 @@ AZ_ROUTE_ALL_TRAFFIC = "0.0.0.0/0" +# Attempt to clean up the DPDK package dependency mess +# Make a Installer class that implements the common steps +# for installing DPDK/rdma-core, either from source or the package manager. +# This generic class will get implemented in DpdkTestpmd and RdmaCore. +# This should help us cover the various installation cases in a nice way, +# and allow us to only re-implement the bits we need for each project. +class OsPackageDependencies: + # A class to reduce the isinstance() trees that are + # sprinkled everywhere. + # Caller provides a function to match an OS and + # the packages to install on that OS. + def __init__( + self, + os_matcher: Callable[[Posix], bool], + packages: Sequence[Union[str, Tool, Type[Tool]]], + ) -> None: + self.check_node_os = os_matcher + self.packages = packages + + +class DependencyInstaller: + # provide a list of OsPackageDependencies for a project + def __init__(self, requirements: List[OsPackageDependencies]) -> None: + self.requirements = requirements + + # evaluate the list of package dependencies, + def install_required_packages( + self, node: Node, extra_args: Union[List[str], None] + ) -> None: + os = node.os + if not isinstance(os, Posix): + return + # find the first match for an OS, install the packages. + # Most specific should go first, most generic should go last. + for os_package_check in self.requirements: + if os_package_check.check_node_os(os): + os.install_packages(os_package_check.packages, extra_args=extra_args) + return + raise UnsupportedDistroException( + os, + message=("Installer did not define dependencies for this os."), + ) + + +class Installer: + # Generic 'Installer' parent class for DpdkTestpmd/rdma-core + # NOTE: This should not be instantiated directly. + _err_msg = "not implemented for this installation type." + + # setup the node before starting + # ex: updating the kernel, enabling features, checking drivers, etc. + def _setup_node(self) -> None: + raise NotImplementedError(f"_setup_node {self._err_msg}") + + # check if the package is already installed: + # Is the package installed from source? Or from the package manager? + # Does the version match the one we want if we need a specific one? + def _check_if_installed(self) -> bool: + raise NotImplementedError(f"_check_if_installed {self._err_msg}") + + # setup the installation (install Ninja, Meson, etc) + def _setup_installation(self) -> None: + raise NotImplementedError(f"_setup_installation {self._err_msg}") + + # do the build and installation + def _run_installation(self) -> None: + raise NotImplementedError(f"_run_installation {self._err_msg}") + + # remove an installation + def _clean_previous_installation(self) -> None: + raise NotImplementedError(f"_clean_previous_installation {self._err_msg}") + + # provide an opportunity to check tags, fetch a subproject, + # modify the config, etc. + def _configure_installation(self) -> None: + raise NotImplementedError(f"_configure_installation {self._err_msg}") + + # install the dependencies + def _install_dependencies(self) -> None: + if self._os_dependencies is not None: + self._os_dependencies.install_required_packages( + self._node, extra_args=self._package_manager_extra_args + ) + + # define how to check the installed version + def get_installed_version(self) -> VersionInfo: + raise NotImplementedError(f"get_installed_version {self._err_msg}") + + # run the defined setup and installation steps. + def do_installation(self, required_version: Optional[VersionInfo] = None) -> None: + self._setup_node() + if self._check_if_installed() and required_version is not None: + if self.get_installed_version() == required_version: + return + else: + self._clean_previous_installation() + self._install_dependencies() + self._setup_installation() + self._configure_installation() + self._run_installation() + + def __init__( + self, node: Node, os_dependencies: Optional[DependencyInstaller] = None + ) -> None: + self._node = node + if not isinstance(self._node.os, Posix): + raise UnsupportedDistroException( + self._node.os, "Installer parent class requires Posix OS." + ) + self._os: Posix = self._node.os + self._package_manager_extra_args: List[str] = [] + self._os_dependencies = os_dependencies + + +# Base class for package manager installation +class PackageManagerInstall(Installer): + def __init__(self, node: Node, os_dependencies: DependencyInstaller) -> None: + super().__init__(node, os_dependencies) + + # define no-ops for source setup functions + def _setup_installation(self) -> None: + return + + def _configure_installation(self) -> None: + return + + # uninstall from the package manager + def _clean_previous_installation(self) -> None: + if not isinstance(self._os, Posix): + return + if self._os_dependencies is not None: + for os_package_check in self._os_dependencies.requirements: + if os_package_check.check_node_os(self._os): + self._os.uninstall_packages(os_package_check.packages) + + # verify packages on the node have been installed by + # the package manager + def _check_if_installed(self) -> bool: + if not isinstance(self._os, Posix): + return False + # WARNING: Don't use this for long lists of packages. + # For dpdk, pkg-manager install is only for 'dpdk' and 'dpdk-dev' + # This will take too long if it's more than a few packages. + if self._os_dependencies is not None: + for os_package_check in self._os_dependencies.requirements: + if os_package_check.check_node_os(self._os): + for pkg in os_package_check.packages: + if not self._os.package_exists(pkg): + return False + return True + + # installing dependencies is the installation in this case, so just return + def _run_installation(self) -> None: + return + + +# base class for source installs +class SourceInstall(Installer): + def __init__( + self, + node: Node, + os_dependencies: DependencyInstaller, + ) -> None: + super().__init__(node) + self._checkout_path = self._node.get_working_path() + + +class GitInstall(SourceInstall): + git_repo: str = "" + git_ref: str = "" + + def __init__( + self, + node: Node, + os_dependencies: DependencyInstaller, + git_repo: str, + git_ref: str, + ) -> None: + super().__init__(node, os_dependencies) + self.git_repo = git_repo + self.git_ref = git_ref + # initialize this as a purepath. + + # define a no-op by default, children can override if needed + def _configure_installation(self) -> None: + return + + # checkout the git repository into the working path + def _setup_installation(self) -> None: + # NOTE: fail on exists is set to True. + # The expectation is that the parent Installer class should + # remove any lingering installations + self._source_path = self._node.tools[Git].clone( + self.git_repo, + cwd=self._checkout_path, + ref=self.git_ref, + fail_on_exists=True, + ) + + +# parent class for tarball source installations +class TarInstall(SourceInstall): + def __init__( + self, + node: Node, + os_dependencies: DependencyInstaller, + tar_url: str, + ) -> None: + super().__init__(node, os_dependencies) + self._tar_url = tar_url + self._is_remote_tarball = tar_url.startswith("https://") + + # no-op by default, children can override + def _configure_installation(self) -> None: + return + + # fetch the tarball (or copy it to the node) + # then extract it + def _setup_installation(self) -> None: + node = self._node + if self._is_remote_tarball: + tarfile = node.tools[Wget].get( + self._tar_url, + file_path=str(self._checkout_path), + overwrite=False, + force_run=True, + ) + remote_path = node.get_pure_path(tarfile) + self.tar_filename = remote_path.name + else: + self.tar_filename = PurePath(self._tar_url).name + remote_path = self._checkout_path.joinpath(self.tar_filename) + node.shell.copy( + local_path=PurePath(self._tar_url), + node_path=remote_path, + ) + # get rid of suffixes like .tar[.gz] + for suffix in [".tar.gz", ".tar.bz2", ".tar"]: + if self.tar_filename.endswith(suffix): + unpacked_filename = self.tar_filename[: -len(suffix)] + self._source_path = self._checkout_path.joinpath(unpacked_filename) + break + # unpack into the dest dir + node.tools[Tar].extract( + file=str(remote_path), + dest_dir=str(self._checkout_path), + gzip=True, + ) + + def force_dpdk_default_source(variables: Dict[str, Any]) -> None: if not variables.get("dpdk_source", None): variables["dpdk_source"] = DPDK_STABLE_GIT_REPO @@ -89,8 +344,39 @@ def check_dpdk_support(node: Node) -> None: raise UnsupportedDistroException( node.os, "This OS is not supported by the DPDK test suite for Azure." ) - + # verify MANA driver is available for the kernel version + if ( + isinstance(node.os, (Debian, Fedora, Suse, Fedora)) + and node.nics.is_mana_device_present() + ): + # NOTE: Kernel backport examples are available for lower kernels. + # HOWEVER: these are not suitable for general testing and should be installed + # in the image _before_ starting the test. + # ex: make a SIG image first using the kernel build transformer. + if node.os.get_kernel_information().version < "5.15.0": + raise UnsupportedDistroException( + node.os, "MANA driver is not available for kernel < 5.15" + ) if not supported: raise UnsupportedDistroException( node.os, "This OS version is EOL and is not supported for DPDK on Azure" ) + + +def is_url_for_tarball(url: str) -> bool: + return ".tar" in PurePath(url).suffixes + + +def is_url_for_git_repo(url: str) -> bool: + parsed_url = parse_url(url) + scheme = parsed_url.scheme + path = parsed_url.path + if not (scheme and path): + return False + # investigate the rest of the URL as a path + path_check = PurePath(path) + check_for_git_https = scheme in ["http", "https"] and ( + path_check.suffixes == [".git"] + or any([x in path_check.parts for x in ["git", "_git"]]) + ) + return scheme == "git" or check_for_git_https diff --git a/microsoft/testsuites/dpdk/dpdksuite.py b/microsoft/testsuites/dpdk/dpdksuite.py index 451dea3640..2340656bfa 100644 --- a/microsoft/testsuites/dpdk/dpdksuite.py +++ b/microsoft/testsuites/dpdk/dpdksuite.py @@ -620,7 +620,10 @@ def verify_dpdk_send_receive_multi_txrx_queue_failsafe( ), ) def verify_dpdk_send_receive_multi_txrx_queue_netvsc( - self, environment: Environment, log: Logger, variables: Dict[str, Any] + self, + environment: Environment, + log: Logger, + variables: Dict[str, Any], ) -> None: try: verify_dpdk_send_receive_multi_txrx_queue( diff --git a/microsoft/testsuites/dpdk/dpdktestpmd.py b/microsoft/testsuites/dpdk/dpdktestpmd.py index a1d29d08b5..a223a1bcf0 100644 --- a/microsoft/testsuites/dpdk/dpdktestpmd.py +++ b/microsoft/testsuites/dpdk/dpdktestpmd.py @@ -2,17 +2,18 @@ # Licensed under the MIT license. import re -from pathlib import PurePosixPath +from pathlib import PurePath, PurePosixPath from typing import Any, List, Tuple, Type, Union from assertpy import assert_that, fail from semver import VersionInfo -from lisa.base_tools import Mv from lisa.executable import ExecutableResult, Tool from lisa.nic import NicInfo +from lisa.node import Node from lisa.operating_system import Debian, Fedora, Suse, Ubuntu from lisa.tools import ( + Cp, Echo, Git, KernelConfig, @@ -20,29 +21,291 @@ Lscpu, Lspci, Make, + Meson, Modprobe, Pidof, Pkgconfig, - Rm, - Service, Tar, Timeout, Wget, ) -from lisa.util import ( - LisaException, - MissingPackagesException, - SkippedException, - UnsupportedDistroException, -) +from lisa.util import LisaException, SkippedException, UnsupportedDistroException from lisa.util.constants import DEVICE_TYPE_SRIOV, SIGINT from microsoft.testsuites.dpdk.common import ( + DependencyInstaller, + GitInstall, + Installer, + OsPackageDependencies, + PackageManagerInstall, + SourceInstall, + TarInstall, is_ubuntu_latest_or_prerelease, is_ubuntu_lts_version, + is_url_for_git_repo, + is_url_for_tarball, ) PACKAGE_MANAGER_SOURCE = "package_manager" +# declare package dependencies for package manager DPDK installation +DPDK_PACKAGE_MANAGER_PACKAGES = DependencyInstaller( + requirements=[ + OsPackageDependencies( + os_matcher=lambda x: isinstance(x, Debian), packages=["dpdk", "dpdk-dev"] + ), + OsPackageDependencies( + os_matcher=lambda x: isinstance(x, Suse) + and float(x.information.release) == 15.5, + packages=["dpdk22", "dpdk22-devel"], + ), + OsPackageDependencies( + os_matcher=lambda x: isinstance(x, (Fedora, Suse)), + packages=["dpdk", "dpdk-devel"], + ), + ] +) +# declare package/tool dependencies for DPDK source installation +DPDK_SOURCE_INSTALL_PACKAGES = DependencyInstaller( + requirements=[ + OsPackageDependencies( + os_matcher=lambda x: isinstance(x, Ubuntu) + and "bionic" in x.information.codename, + packages=[ + Meson, + "build-essential", + "libmnl-dev", + "libelf-dev", + "libnuma-dev", + "dpkg-dev", + "pkg-config", + "python3-pip", + "python3-pyelftools", + "python-pyelftools", + # 18.04 doesn't need linux-modules-extra-azure + # since it will never have MANA support + ], + ), + OsPackageDependencies( + os_matcher=lambda x: isinstance(x, Debian), + packages=[ + Meson, + "build-essential", + "libnuma-dev", + "libmnl-dev", + "ninja-build", + "python3-pyelftools", + "libelf-dev", + "pkg-config", + "linux-modules-extra-azure", + ], + ), + OsPackageDependencies( + os_matcher=lambda x: isinstance(x, Suse), + packages=[ + Meson, + "psmisc", + "libnuma-devel", + "numactl", + "libmnl-devel meson", + "gcc-c++", + ], + ), + OsPackageDependencies( + os_matcher=lambda x: isinstance(x, (Fedora)), + packages=[ + Meson, + "psmisc", + "numactl-devel", + "pkgconfig", + "elfutils-libelf-devel", + "python3-pip", + "kernel-modules-extra", + "kernel-headers", + "gcc-c++", + ], + ), + ] +) + + +def get_debian_backport_repo_args(os: Debian) -> List[str]: + if not isinstance(os, Debian): + return [] + if isinstance(os, Ubuntu) and ( + is_ubuntu_latest_or_prerelease(os) or not is_ubuntu_lts_version(os) + ): + return [] + repos = os.get_repositories() + backport_repo = f"{os.information.codename}-backports" + if any([backport_repo in repo.name for repo in repos]): + return [f"-t {backport_repo}"] + return [] + + +class DpdkPackageManagerInstall(PackageManagerInstall): + def _setup_node(self) -> None: + if isinstance(self._os, Debian): + self._package_manager_extra_args = get_debian_backport_repo_args(self._os) + if self._os.information.version < "22.4.0": + self._os.update_packages("linux-azure") + self._node.reboot() + + def get_installed_version(self) -> VersionInfo: + return self._os.get_package_information("dpdk", use_cached=False) + + def _check_if_installed(self) -> bool: + return self._os.package_exists("dpdk") + + +# implement SourceInstall for DPDK +class DpdkSourceInstall(SourceInstall): + _dpdk_devname_repo = "https://www.github.com/mcgov/devname.git" + _sample_applications = [ + "l3fwd", + "multi_process/client_server_mp/mp_server", + "multi_process/client_server_mp/mp_client", + "devname", + ] + + def _check_if_installed(self) -> bool: + try: + package_manager_install = self._os.package_exists("dpdk") + # _get_installed_version for source install throws + # if package is not found. So we don't need the result, + # if the function doesn't throw, the version was found. + _ = self.get_installed_version() + # this becomes '(not package manager installed) and + # _get_installed_version() doesn't throw' + return not package_manager_install + except AssertionError: + # _get_installed_version threw an AssertionError + # so PkgConfig info was not found + return False + + def _setup_node(self) -> None: + self._source_path = self._node.get_working_path().joinpath("dpdk") + + if isinstance(self._os, Debian): + self._package_manager_extra_args = get_debian_backport_repo_args(self._os) + if self._os.information.version < "22.4.0": + self._os.update_packages("linux-azure") + self._node.reboot() + + def _clean_previous_installation(self) -> None: + # undo source installation (thanks ninja) + self._node.execute( + "ninja uninstall", shell=True, sudo=True, cwd=self.dpdk_build_path + ) + # remove dpdk-devname + self._node.execute("rm -f /usr/local/bin/dpdk-devname", sudo=True, shell=True) + source_path = str(self._source_path) + working_path = str(self._node.get_working_path()) + assert_that(str(source_path)).described_as( + "DPDK Installer source path was empty during attempted cleanup!" + ).is_not_empty() + assert_that(str(source_path)).described_as( + "DPDK Installer source path was set to root dir " + "'/' during attempted cleanup!" + ).is_not_equal_to("/") + assert_that(str(source_path)).described_as( + f"DPDK Installer source path {source_path} was set to " + f"working path '{working_path}' during attempted cleanup!" + ).is_not_equal_to(working_path) + # remove source code directory + self._node.execute(f"rm -rf {str(source_path)}", shell=True) + + def _configure_installation(self) -> None: + git = self._node.tools[Git] + # clone devname repository into dpdk/examples + # allows us to build the small port identifier app + git.clone( + self._dpdk_devname_repo, + cwd=self._source_path.joinpath("examples"), + fail_on_exists=True, + ) + + def get_installed_version(self) -> VersionInfo: + return self._node.tools[Pkgconfig].get_package_version( + "libdpdk", update_cached=True + ) + + def _run_installation(self) -> None: + # check out devname into dpdk/examples (don't need to save the dir) + if self._sample_applications: + sample_apps = f"-Dexamples={','.join(self._sample_applications)}" + else: + sample_apps = "" + node = self._node + # + self.dpdk_build_path = node.tools[Meson].setup( + args=sample_apps, build_dir="build", cwd=self._source_path + ) + node.execute( + "ninja", + cwd=self.dpdk_build_path, + timeout=1800, + expected_exit_code=0, + expected_exit_code_failure_message=( + "ninja build for dpdk failed. check build spew for missing headers " + "or dependencies. Also check that this ninja version requirement " + "has not changed for dpdk." + ), + ) + node.execute( + "ninja install", + cwd=self.dpdk_build_path, + sudo=True, + expected_exit_code=0, + expected_exit_code_failure_message=( + "ninja install failed for dpdk binaries." + ), + ) + node.execute( + "ldconfig", + cwd=self.dpdk_build_path, + sudo=True, + expected_exit_code=0, + expected_exit_code_failure_message="ldconfig failed, check for error spew.", + ) + library_bashrc_lines = [ + "export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}:/usr/local/lib64/pkgconfig/", + "export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib64/", + ] + node.tools[Echo].write_to_file( + ";".join(library_bashrc_lines), + node.get_pure_path("$HOME/.bashrc"), + append=True, + ) + node.tools[Cp].copy( + self.dpdk_build_path.joinpath("examples/dpdk-devname"), + dest=node.get_pure_path("/usr/local/bin"), + sudo=True, + ) + + +class DpdkGitInstall(DpdkSourceInstall, GitInstall): + def __init__( + self, + node: Node, + os_dependencies: DependencyInstaller, + git_repo: str, + git_ref: str, + ) -> None: + super().__init__(node, os_dependencies, git_repo, git_ref) + + def _configure_installation(self) -> None: + if not self.git_ref: + git = self._node.tools[Git] + self.git_ref = git.get_tag( + self._source_path, filter_=r"^v.*" # starts w 'v' + ) + git.checkout(self.git_ref, cwd=self._source_path) + return super()._configure_installation() + + +class DpdkTarInstall(DpdkSourceInstall, TarInstall): + ... + class DpdkTestpmd(Tool): # TestPMD tool to bundle the DPDK build and toolset together. @@ -203,10 +466,17 @@ def generate_testpmd_include( # include flag changed to 'allowlist' in 20.11 # use 'allow' instead of 'deny' for envionments where # there is 1 shared bus address (MANA) - if self._dpdk_version_info and self._dpdk_version_info < "20.11.0": - include_flag = "-w" - else: + # NOTE: I keep running into weird special cases of this. + # 21.11 on ubuntu has -a even though 20.11+ shouldn't... + help_output = self.node.execute( + f"{self.command} --help", no_debug_log=True, no_info_log=True + ) + allow_flag = "-a, --allow" in (help_output.stderr + help_output.stdout) + if allow_flag: include_flag = "-a" + else: + include_flag = "-w" + include_flag = f' {include_flag} "{node_nic.pci_slot}"' # build pmd argument @@ -331,7 +601,10 @@ def generate_testpmd_command( assert_that(max_core_index).described_as( "Test needs at least 1 core for servicing and one core for forwarding" ).is_greater_than(0) - + assert_that(self._testpmd_install_path).described_as( + "Testpmd install path was not set, this indicates a logic" + " error in the DPDK installation process." + ).is_not_empty() return ( f"{self._testpmd_install_path} {core_list} " f"{nic_include_info} -- --forward-mode={mode} " @@ -470,14 +743,44 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self._sample_apps_to_build = kwargs.pop("sample_apps", []) self._dpdk_version_info = VersionInfo(0, 0) self._testpmd_install_path: str = "" - if not self.use_package_manager_install(): + self.dpdk_build_path: PurePath = PurePath("") + self._expected_install_path = "" + self._determine_network_hardware() + if self.use_package_manager_install(): + self.installer: Installer = DpdkPackageManagerInstall( + self.node, DPDK_PACKAGE_MANAGER_PACKAGES + ) + # if not package manager, choose source installation + else: self._dpdk_repo_path_name = "dpdk" + self._expected_install_path = "/usr/local/bin" + work_path = self.node.get_working_path_with_required_space(5) self.current_work_path = self.node.get_pure_path(work_path) self.dpdk_path = self.node.get_pure_path(work_path).joinpath( self._dpdk_repo_path_name ) - self._determine_network_hardware() + if is_url_for_git_repo(self._dpdk_source): + self.installer = DpdkGitInstall( + node=self.node, + os_dependencies=DPDK_SOURCE_INSTALL_PACKAGES, + git_repo=self._dpdk_source, + git_ref=self._dpdk_branch, + ) + elif is_url_for_tarball(self._dpdk_source): + self.installer = DpdkTarInstall( + node=self.node, + os_dependencies=DPDK_SOURCE_INSTALL_PACKAGES, + tar_url=self._dpdk_source, + ) + else: + fail( + "URL provided for dpdk source did not validate as " + f"a tarball or git repo. Found {self._dpdk_source} " + " Expected https://___/___.git or /path/to/tar.tar[.gz] or " + "https://__/__.tar[.gz]" + ) + # if dpdk is already installed, find the binary and check the version if self.find_testpmd_binary(assert_on_fail=False): pkgconfig = self.node.tools[Pkgconfig] @@ -555,13 +858,13 @@ def _install_upstream_rdma_core_for_mana(self) -> None: tar_path = wget.get( url=( "https://github.com/linux-rdma/rdma-core/" - "releases/download/v46.0/rdma-core-46.0.tar.gz" + "releases/download/v50.1/rdma-core-50.1.tar.gz" ), file_path=str(node.working_path), ) tar.extract(tar_path, dest_dir=str(node.working_path), gzip=True, sudo=True) - source_path = node.working_path.joinpath("rdma-core-46.0") + source_path = node.working_path.joinpath("rdma-core-50.1") node.execute( "cmake -DIN_PLACE=0 -DNO_MAN_PAGES=1 -DCMAKE_INSTALL_PREFIX=/usr", shell=True, @@ -570,199 +873,29 @@ def _install_upstream_rdma_core_for_mana(self) -> None: ) make.make_install(source_path) - def _set_backport_repo_args(self) -> None: - distro = self.node.os - # skip attempting to use backports for latest/prerlease - # and non-lts ubuntu versions - if isinstance(distro, Ubuntu) and ( - is_ubuntu_latest_or_prerelease(distro) or not is_ubuntu_lts_version(distro) - ): - self._backport_repo_args = [] - # otherwise check if a backport repo exists use it if so - elif isinstance(distro, Debian): - repos = distro.get_repositories() - backport_repo = f"{distro.information.codename}-backports" - if any([backport_repo in repo.name for repo in repos]): - self._backport_repo_args = [f"-t {backport_repo}"] - else: - self._backport_repo_args = [] - # otherwise assume backports are included in default repos - # TODO: check for how RHEL and SUSE manage backports - else: - self._backport_repo_args = [] - def _install(self) -> bool: self._testpmd_output_after_reenable = "" self._testpmd_output_before_rescind = "" self._testpmd_output_during_rescind = "" self._last_run_output = "" node = self.node - # before doing anything: determine if backport repo needs to be enabled - self._set_backport_repo_args() - - if self.has_dpdk_version(): - # DPDK is already installed - node.log.info( - "DPDK was installed from source previously, using existing DPDK." - ) - self._load_drivers_for_dpdk() - return True - - # otherwise, install from package manager, git, or tar - - self._install_dependencies() - - # if this is mana VM, we need an upstream rdma-core package (for now) + if not isinstance(node.os, (Debian, Fedora, Suse)): + return False + if isinstance(node.os, Ubuntu) and node.os.information.codename == "bionic": + # bionic needs to update to latest first + node.os.update_packages("") if self.is_mana: if not (isinstance(node.os, Ubuntu) or isinstance(node.os, Fedora)): raise SkippedException("MANA DPDK test is not supported on this OS") - # ensure no older dependency is installed node.os.uninstall_packages("rdma-core") self._install_upstream_rdma_core_for_mana() - - # installing from distro package manager - if self.use_package_manager_install(): - self.node.log.info( - "Installing dpdk and dev package from package manager..." - ) - if isinstance(node.os, Debian): - node.os.install_packages( - ["dpdk", "dpdk-dev"], - extra_args=self._backport_repo_args, - ) - elif ( - isinstance(node.os, Suse) and float(node.os.information.release) == 15.5 - ): - node.os.install_packages(["dpdk22", "dpdk22-devel"]) - elif isinstance(node.os, (Fedora, Suse)): - node.os.install_packages(["dpdk", "dpdk-devel"]) - else: - raise NotImplementedError( - "Dpdk package names are missing in dpdktestpmd.install" - f" for os {node.os.name}" - ) - self.node.log.info( - f"Installed DPDK version {str(self._dpdk_version_info)} " - "from package manager" - ) - if isinstance(node.os, Suse) and float(node.os.information.release) == 15.5: - self._dpdk_version_info = node.os.get_package_information("dpdk22") - else: - self._dpdk_version_info = node.os.get_package_information("dpdk") - self.find_testpmd_binary() - self._load_drivers_for_dpdk() - return True - - # otherwise install from source tarball or git - self.node.log.info(f"Installing dpdk from source: {self._dpdk_source}") - - if self.find_testpmd_binary( - assert_on_fail=False, check_path="/usr/local/bin" - ): # tools are already installed - # version info must already be set from __init__ - return True - - git_tool = node.tools[Git] - echo_tool = node.tools[Echo] - - if self._dpdk_source and self._dpdk_source.endswith(".tar.gz"): - wget_tool = node.tools[Wget] - tar_tool = node.tools[Tar] - if self._dpdk_branch: - node.log.warn( - ( - "DPDK tarball source does not need dpdk_branch defined. " - "User-defined variable dpdk_branch will be ignored." - ) - ) - wget_tool.get( - self._dpdk_source, - str(self.current_work_path), - ) - dpdk_filename = self._dpdk_source.split("/")[-1] - # extract tar into dpdk/ folder and discard old root folder name - tar_tool.extract( - str(self.current_work_path.joinpath(dpdk_filename)), - str(self.dpdk_path), - strip_components=1, - ) else: - git_tool.clone( - self._dpdk_source, - cwd=self.current_work_path, - dir_name=self._dpdk_repo_path_name, - ) - if not self._dpdk_branch: - # dpdk stopped using a default branch - # if a branch is not specified, get latest version tag. - self._dpdk_branch = git_tool.get_tag( - self.dpdk_path, filter_=r"^v.*" # starts w 'v' - ) - - git_tool.checkout(self._dpdk_branch, cwd=self.dpdk_path) - + node.os.install_packages(self.get_rdma_core_package_name()) + self.installer.do_installation() + self._dpdk_version_info = self.installer.get_installed_version() self._load_drivers_for_dpdk() - - # add sample apps to compilation if they are present - if self._sample_apps_to_build: - sample_apps = f"-Dexamples={','.join(self._sample_apps_to_build)}" - else: - sample_apps = "" - - node.execute( - f"meson {sample_apps} build", - shell=True, - cwd=self.dpdk_path, - expected_exit_code=0, - expected_exit_code_failure_message=( - "meson build for dpdk failed, check that" - "dpdk build has not changed to eliminate the use of meson or " - "meson version is compatible with this dpdk version and OS." - ), - ) - self.dpdk_build_path = self.dpdk_path.joinpath("build") - node.execute( - "ninja", - cwd=self.dpdk_build_path, - timeout=1800, - expected_exit_code=0, - expected_exit_code_failure_message=( - "ninja build for dpdk failed. check build spew for missing headers " - "or dependencies. Also check that this ninja version requirement " - "has not changed for dpdk." - ), - ) - node.execute( - "ninja install", - cwd=self.dpdk_build_path, - sudo=True, - expected_exit_code=0, - expected_exit_code_failure_message=( - "ninja install failed for dpdk binaries." - ), - ) - node.execute( - "ldconfig", - cwd=self.dpdk_build_path, - sudo=True, - expected_exit_code=0, - expected_exit_code_failure_message="ldconfig failed, check for error spew.", - ) - library_bashrc_lines = [ - "export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}:/usr/local/lib64/pkgconfig/", - "export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib64/", - ] - echo_tool.write_to_file( - ";".join(library_bashrc_lines), - node.get_pure_path("~/.bashrc"), - append=True, - ) - - self.find_testpmd_binary(check_path="/usr/local/bin") - self._dpdk_version_info = self.node.tools[Pkgconfig].get_package_version( - self._dpdk_lib_name, update_cached=True - ) + self.find_testpmd_binary(check_path=self._expected_install_path) return True def _load_drivers_for_dpdk(self) -> None: @@ -838,193 +971,6 @@ def _load_drivers_for_dpdk(self) -> None: if network_drivers: modprobe.load(network_drivers) - def _install_dependencies(self) -> None: - node = self.node - if isinstance(node.os, Ubuntu): - self._install_ubuntu_dependencies() - elif isinstance(node.os, Debian): - node.os.install_packages( - self._debian_packages, extra_args=self._backport_repo_args - ) - elif isinstance(node.os, Fedora): - self._install_fedora_dependencies() - elif isinstance(node.os, Suse): - self._install_suse_dependencies() - else: - raise UnsupportedDistroException( - node.os, "This OS does not have dpdk installation implemented yet." - ) - - def _install_suse_dependencies(self) -> None: - node = self.node - suse = node.os - if not isinstance(suse, Suse): - fail( - "_install_suse_dependencies was called on node " - f"which was not suse: {node.os.information.full_version}" - ) - return # appease the type checker - if suse.information.version < "15.0.0": - raise SkippedException( - f"Suse {str(suse.information.version)} is not supported. " - "Minimum documented version for DPDK support is >= SLES15" - ) - else: - suse.install_packages(self._suse_packages) - if not self.use_package_manager_install(): - self._install_ninja_and_meson() - rdma_core_packages = self.get_rdma_core_package_name() - if rdma_core_packages: - suse.install_packages(rdma_core_packages.split()) - - def _install_ubuntu_dependencies(self) -> None: - node = self.node - ubuntu = node.os - if not isinstance(ubuntu, Ubuntu): - fail( - "_install_ubuntu_dependencies was called on node " - f"which was not Ubuntu: {node.os.information.full_version}" - ) - return # appease the type checker - - # apply update to latest first - ubuntu.update_packages("linux-azure") - node.reboot() - if ubuntu.information.version < "18.4.0": - raise SkippedException( - f"Ubuntu {str(ubuntu.information.version)} is not supported. " - "Minimum documented version for DPDK support is >=18.04" - ) - elif ubuntu.information.version < "20.4.0": - ubuntu.install_packages( - self._ubuntu_packages_1804, - extra_args=self._backport_repo_args, - ) - if not self.use_package_manager_install(): - self._install_ninja_and_meson() - else: - ubuntu.install_packages( - self._ubuntu_packages_2004, - extra_args=self._backport_repo_args, - ) - # MANA tests use linux-modules-extra-azure, install if it's available. - if self.is_mana and ubuntu.is_package_in_repo("linux-modules-extra-azure"): - ubuntu.install_packages("linux-modules-extra-azure") - rdma_core_packages = self.get_rdma_core_package_name() - if rdma_core_packages: - ubuntu.install_packages(rdma_core_packages.split()) - - def _install_fedora_dependencies(self) -> None: - node = self.node - rhel = node.os - if not isinstance(rhel, Fedora): - fail( - "_install_fedora_dependencies was called on node " - f"which was not Fedora: {node.os.information.full_version}" - ) - return # appease the type checker - - # DPDK is very sensitive to rdma-core/kernel mismatches - # update to latest kernel before instaling dependencies - rhel.install_packages("kernel") - node.reboot() - - if rhel.information.version.major == 7: - # Add packages for rhel7 - rhel.install_packages(["libmnl-devel", "libbpf-devel"]) - - try: - rhel.install_packages("kernel-devel-$(uname -r)") - except MissingPackagesException: - node.log.debug("kernel-devel-$(uname -r) not found. Trying kernel-devel") - rhel.install_packages("kernel-devel") - - # RHEL 8 doesn't require special cases for installed packages. - # TODO: RHEL9 may require updates upon release - rdma_core_packages = self.get_rdma_core_package_name() - if rdma_core_packages: - self._fedora_packages += rdma_core_packages.split() - rhel.group_install_packages("Infiniband Support") - - rhel.group_install_packages("Development Tools") - rhel.install_packages(self._fedora_packages) - - # ensure RDMA service is started if present. - - service_name = "rdma" - service = node.tools[Service] - if service.check_service_exists(service_name): - if not service.check_service_status(service_name): - service.enable_service(service_name) - - # some versions of RHEL and CentOS have service.rdma - # that will refuse manual start/stop and will return - # NOPERMISSION. This is not fatal and can be continued. - # If the service is present it should start when needed. - service.restart_service( - service_name, ignore_exit_code=service.SYSTEMD_EXIT_NOPERMISSION - ) - - if not self.use_package_manager_install(): - self._install_ninja_and_meson() - - def _install_ninja_and_meson(self) -> None: - node = self.node - - node.execute( - "pip3 install --upgrade meson", - sudo=True, - expected_exit_code=0, - expected_exit_code_failure_message=( - "Failed to update Meson to latest version with pip3" - ), - ) - # after upgrade meson - # if meson is in /usr/local/bin, link it - # if meson is in /usr/bin, do nothing, upgrade will overwrite it - if node.shell.exists(node.get_pure_path("/usr/local/bin/meson")): - node.tools[Rm].remove_file("/usr/bin/meson", sudo=True) - node.execute( - "ln -fs /usr/local/bin/meson /usr/bin/meson", - sudo=True, - expected_exit_code=0, - expected_exit_code_failure_message=( - "Failed to link new meson version as the default " - "version in /usr/bin" - ), - ) - - # NOTE: finding latest ninja is a pain, - # so just fetch latest from github here - git_tool = self.node.tools[Git] - git_tool.clone( - self._ninja_url, - cwd=node.working_path, - ) - node.execute( - "./configure.py --bootstrap", - cwd=node.get_pure_path(f"{node.working_path}/ninja"), - expected_exit_code=0, - expected_exit_code_failure_message=( - "Failed to run ./configure.py --bootstrap" - ), - ) - node.tools[Mv].move( - f"{node.working_path}/ninja/ninja", - "/usr/bin/ninja", - overwrite=True, - sudo=True, - ) - - node.execute( - "pip3 install --upgrade pyelftools", - sudo=True, - expected_exit_code=0, - expected_exit_code_failure_message=( - "Could not upgrade pyelftools with pip3." - ), - ) - def find_testpmd_binary( self, check_path: str = "", assert_on_fail: bool = True ) -> bool: diff --git a/microsoft/testsuites/dpdk/dpdkutil.py b/microsoft/testsuites/dpdk/dpdkutil.py index 67073df1f7..984835f852 100644 --- a/microsoft/testsuites/dpdk/dpdkutil.py +++ b/microsoft/testsuites/dpdk/dpdkutil.py @@ -564,7 +564,11 @@ def _parallel_cleanup(node: Node) -> None: if not interface.is_enabled_sriov(): interface.switch_sriov(enable=True, wait=False, reset_connections=True) # cleanup temporary hugepage and driver changes - node.reboot() + try: + node.reboot(time_out=60) + except LisaException: + node.log.debug("Timeout during cleanup reboot. Marking node for deletion.") + node.mark_dirty() run_in_parallel( [partial(_parallel_cleanup, node) for node in environment.nodes.list()]