From d6d19313bd46ff2e04e4a7074d5f648303eee18f Mon Sep 17 00:00:00 2001 From: martyav Date: Fri, 13 Sep 2024 15:27:38 -0400 Subject: [PATCH 01/31] Fix 'title out of sequence' errors fixed Dockershim.md --- docs/faq/dockershim.md | 2 +- versioned_docs/version-2.6/faq/dockershim.md | 2 +- versioned_docs/version-2.7/faq/dockershim.md | 2 +- versioned_docs/version-2.8/faq/dockershim.md | 2 +- versioned_docs/version-2.9/faq/dockershim.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/faq/dockershim.md b/docs/faq/dockershim.md index 4e710c9746b2..0edaa23b6aa6 100644 --- a/docs/faq/dockershim.md +++ b/docs/faq/dockershim.md @@ -18,7 +18,7 @@ enable_cri_dockerd: true For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher even after the removal of in-tree Dockershim in Kubernetes 1.24. -### FAQ +## FAQ
diff --git a/versioned_docs/version-2.6/faq/dockershim.md b/versioned_docs/version-2.6/faq/dockershim.md index 4e710c9746b2..0edaa23b6aa6 100644 --- a/versioned_docs/version-2.6/faq/dockershim.md +++ b/versioned_docs/version-2.6/faq/dockershim.md @@ -18,7 +18,7 @@ enable_cri_dockerd: true For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher even after the removal of in-tree Dockershim in Kubernetes 1.24. -### FAQ +## FAQ
diff --git a/versioned_docs/version-2.7/faq/dockershim.md b/versioned_docs/version-2.7/faq/dockershim.md index 4e710c9746b2..0edaa23b6aa6 100644 --- a/versioned_docs/version-2.7/faq/dockershim.md +++ b/versioned_docs/version-2.7/faq/dockershim.md @@ -18,7 +18,7 @@ enable_cri_dockerd: true For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher even after the removal of in-tree Dockershim in Kubernetes 1.24. -### FAQ +## FAQ
diff --git a/versioned_docs/version-2.8/faq/dockershim.md b/versioned_docs/version-2.8/faq/dockershim.md index 4e710c9746b2..0edaa23b6aa6 100644 --- a/versioned_docs/version-2.8/faq/dockershim.md +++ b/versioned_docs/version-2.8/faq/dockershim.md @@ -18,7 +18,7 @@ enable_cri_dockerd: true For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher even after the removal of in-tree Dockershim in Kubernetes 1.24. -### FAQ +## FAQ
diff --git a/versioned_docs/version-2.9/faq/dockershim.md b/versioned_docs/version-2.9/faq/dockershim.md index 4e710c9746b2..0edaa23b6aa6 100644 --- a/versioned_docs/version-2.9/faq/dockershim.md +++ b/versioned_docs/version-2.9/faq/dockershim.md @@ -18,7 +18,7 @@ enable_cri_dockerd: true For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher even after the removal of in-tree Dockershim in Kubernetes 1.24. -### FAQ +## FAQ
From beeefd96b60a6b27106ea096a2b7f2aadffa0d89 Mon Sep 17 00:00:00 2001 From: martyav Date: Fri, 13 Sep 2024 15:40:37 -0400 Subject: [PATCH 02/31] fixed deprecated-features.md --- docs/faq/deprecated-features.md | 6 +++--- versioned_docs/version-2.5/faq/deprecated-features.md | 6 +++--- versioned_docs/version-2.6/faq/deprecated-features.md | 6 +++--- versioned_docs/version-2.7/faq/deprecated-features.md | 6 +++--- versioned_docs/version-2.8/faq/deprecated-features.md | 6 +++--- versioned_docs/version-2.9/faq/deprecated-features.md | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/faq/deprecated-features.md b/docs/faq/deprecated-features.md index 3671baec4eeb..a7a146a82d1e 100644 --- a/docs/faq/deprecated-features.md +++ b/docs/faq/deprecated-features.md @@ -6,11 +6,11 @@ title: Deprecated Features in Rancher -### What is Rancher's deprecation policy? +## What is Rancher's deprecation policy? We have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms). -### Where can I find out which features have been deprecated in Rancher? +## Where can I find out which features have been deprecated in Rancher? Rancher will publish deprecated features as part of the [release notes](https://github.com/rancher/rancher/releases) for Rancher found on GitHub. Please consult the following patch releases for deprecated features: @@ -19,6 +19,6 @@ Rancher will publish deprecated features as part of the [release notes](https:// | [2.9.1](https://github.com/rancher/rancher/releases/tag/v2.9.1) | Aug 26, 2024 | | [2.9.0](https://github.com/rancher/rancher/releases/tag/v2.9.0) | Jul 31, 2024 | -### What can I expect when a feature is marked for deprecation? +## What can I expect when a feature is marked for deprecation? In the release where functionality is marked as "Deprecated", it will still be available and supported allowing upgrades to follow the usual procedure. Once upgraded, users/admins should start planning to move away from the deprecated functionality before upgrading to the release it marked as removed. The recommendation for new deployments is to not use the deprecated feature. \ No newline at end of file diff --git a/versioned_docs/version-2.5/faq/deprecated-features.md b/versioned_docs/version-2.5/faq/deprecated-features.md index 80144c223e84..eb5e694fd7c7 100644 --- a/versioned_docs/version-2.5/faq/deprecated-features.md +++ b/versioned_docs/version-2.5/faq/deprecated-features.md @@ -6,14 +6,14 @@ title: Deprecated Features in Rancher v2.5 -### What is Rancher's deprecation policy? +## What is Rancher's deprecation policy? Starting in Rancher 2.5 we have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms). -### Where can I find out which features have been deprecated in Rancher 2.5? +## Where can I find out which features have been deprecated in Rancher 2.5? Rancher will publish deprecated features as part of the [release notes](https://github.com/rancher/rancher/releases/tag/v2.5.0) for Rancher found on GitHub. -### What can I expect when a feature is marked for deprecation? +## What can I expect when a feature is marked for deprecation? In the release where functionality is marked as Deprecated it will still be available and supported allowing upgrades to follow the usual procedure. Once upgraded, users/admins should start planning to move away from the deprecated functionality before upgrading to the release it marked as removed. The recommendation for new deployments is to not use the deprecated feature. \ No newline at end of file diff --git a/versioned_docs/version-2.6/faq/deprecated-features.md b/versioned_docs/version-2.6/faq/deprecated-features.md index a5ca0d9ebc57..f7700ad05a8f 100644 --- a/versioned_docs/version-2.6/faq/deprecated-features.md +++ b/versioned_docs/version-2.6/faq/deprecated-features.md @@ -6,11 +6,11 @@ title: Deprecated Features in Rancher -### What is Rancher's deprecation policy? +## What is Rancher's deprecation policy? We have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms). -### Where can I find out which features have been deprecated in Rancher? +## Where can I find out which features have been deprecated in Rancher? Rancher will publish deprecated features as part of the [release notes](https://github.com/rancher/rancher/releases) for Rancher found on GitHub. Please consult the following patch releases for deprecated features: @@ -32,6 +32,6 @@ Rancher will publish deprecated features as part of the [release notes](https:// | [2.6.1](https://github.com/rancher/rancher/releases/tag/v2.6.1) | Oct 11, 2021 | | [2.6.0](https://github.com/rancher/rancher/releases/tag/v2.6.0) | Aug 31, 2021 | -### What can I expect when a feature is marked for deprecation? +## What can I expect when a feature is marked for deprecation? In the release where functionality is marked as "Deprecated", it will still be available and supported allowing upgrades to follow the usual procedure. Once upgraded, users/admins should start planning to move away from the deprecated functionality before upgrading to the release it marked as removed. The recommendation for new deployments is to not use the deprecated feature. \ No newline at end of file diff --git a/versioned_docs/version-2.7/faq/deprecated-features.md b/versioned_docs/version-2.7/faq/deprecated-features.md index 0188e762f71c..8ca979b1d87b 100644 --- a/versioned_docs/version-2.7/faq/deprecated-features.md +++ b/versioned_docs/version-2.7/faq/deprecated-features.md @@ -6,11 +6,11 @@ title: Deprecated Features in Rancher -### What is Rancher's deprecation policy? +## What is Rancher's deprecation policy? We have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms). -### Where can I find out which features have been deprecated in Rancher? +## Where can I find out which features have been deprecated in Rancher? Rancher will publish deprecated features as part of the [release notes](https://github.com/rancher/rancher/releases) for Rancher found on GitHub. Please consult the following patch releases for deprecated features: @@ -33,6 +33,6 @@ Rancher will publish deprecated features as part of the [release notes](https:// | [2.7.1](https://github.com/rancher/rancher/releases/tag/v2.7.1) | Jan 24, 2023 | | [2.7.0](https://github.com/rancher/rancher/releases/tag/v2.7.0) | Nov 16, 2022 | -### What can I expect when a feature is marked for deprecation? +## What can I expect when a feature is marked for deprecation? In the release where functionality is marked as "Deprecated", it will still be available and supported allowing upgrades to follow the usual procedure. Once upgraded, users/admins should start planning to move away from the deprecated functionality before upgrading to the release it marked as removed. The recommendation for new deployments is to not use the deprecated feature. \ No newline at end of file diff --git a/versioned_docs/version-2.8/faq/deprecated-features.md b/versioned_docs/version-2.8/faq/deprecated-features.md index 37ba9a9cea1b..5bc9520ef428 100644 --- a/versioned_docs/version-2.8/faq/deprecated-features.md +++ b/versioned_docs/version-2.8/faq/deprecated-features.md @@ -6,11 +6,11 @@ title: Deprecated Features in Rancher -### What is Rancher's deprecation policy? +## What is Rancher's deprecation policy? We have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms). -### Where can I find out which features have been deprecated in Rancher? +## Where can I find out which features have been deprecated in Rancher? Rancher will publish deprecated features as part of the [release notes](https://github.com/rancher/rancher/releases) for Rancher found on GitHub. Please consult the following patch releases for deprecated features: @@ -25,6 +25,6 @@ Rancher will publish deprecated features as part of the [release notes](https:// | [2.8.1](https://github.com/rancher/rancher/releases/tag/v2.8.1) | Jan 22, 2024 | | [2.8.0](https://github.com/rancher/rancher/releases/tag/v2.8.0) | Dec 6, 2023 | -### What can I expect when a feature is marked for deprecation? +## What can I expect when a feature is marked for deprecation? In the release where functionality is marked as "Deprecated", it will still be available and supported allowing upgrades to follow the usual procedure. Once upgraded, users/admins should start planning to move away from the deprecated functionality before upgrading to the release it marked as removed. The recommendation for new deployments is to not use the deprecated feature. \ No newline at end of file diff --git a/versioned_docs/version-2.9/faq/deprecated-features.md b/versioned_docs/version-2.9/faq/deprecated-features.md index 3671baec4eeb..a7a146a82d1e 100644 --- a/versioned_docs/version-2.9/faq/deprecated-features.md +++ b/versioned_docs/version-2.9/faq/deprecated-features.md @@ -6,11 +6,11 @@ title: Deprecated Features in Rancher -### What is Rancher's deprecation policy? +## What is Rancher's deprecation policy? We have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms). -### Where can I find out which features have been deprecated in Rancher? +## Where can I find out which features have been deprecated in Rancher? Rancher will publish deprecated features as part of the [release notes](https://github.com/rancher/rancher/releases) for Rancher found on GitHub. Please consult the following patch releases for deprecated features: @@ -19,6 +19,6 @@ Rancher will publish deprecated features as part of the [release notes](https:// | [2.9.1](https://github.com/rancher/rancher/releases/tag/v2.9.1) | Aug 26, 2024 | | [2.9.0](https://github.com/rancher/rancher/releases/tag/v2.9.0) | Jul 31, 2024 | -### What can I expect when a feature is marked for deprecation? +## What can I expect when a feature is marked for deprecation? In the release where functionality is marked as "Deprecated", it will still be available and supported allowing upgrades to follow the usual procedure. Once upgraded, users/admins should start planning to move away from the deprecated functionality before upgrading to the release it marked as removed. The recommendation for new deployments is to not use the deprecated feature. \ No newline at end of file From 163be282254463d4ee31eb4647d5682e67d554f3 Mon Sep 17 00:00:00 2001 From: martyav Date: Fri, 13 Sep 2024 15:56:00 -0400 Subject: [PATCH 03/31] fixed install-and-configure-kubectl.md --- docs/faq/install-and-configure-kubectl.md | 4 ++-- .../version-2.0-2.4/faq/install-and-configure-kubectl.md | 4 ++-- .../version-2.5/faq/install-and-configure-kubectl.md | 4 ++-- .../version-2.6/faq/install-and-configure-kubectl.md | 4 ++-- .../version-2.7/faq/install-and-configure-kubectl.md | 4 ++-- .../version-2.8/faq/install-and-configure-kubectl.md | 4 ++-- .../version-2.9/faq/install-and-configure-kubectl.md | 4 ++-- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/faq/install-and-configure-kubectl.md b/docs/faq/install-and-configure-kubectl.md index 9bcb56bc3d28..868eb42caacf 100644 --- a/docs/faq/install-and-configure-kubectl.md +++ b/docs/faq/install-and-configure-kubectl.md @@ -8,11 +8,11 @@ title: Installing and Configuring kubectl `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. -### Installation +## Installation See [kubectl Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/) for installation on your operating system. -### Configuration +## Configuration When you create a Kubernetes cluster with RKE, RKE creates a `kube_config_cluster.yml` in the local directory that contains credentials to connect to your new cluster with tools like `kubectl` or `helm`. diff --git a/versioned_docs/version-2.0-2.4/faq/install-and-configure-kubectl.md b/versioned_docs/version-2.0-2.4/faq/install-and-configure-kubectl.md index 9623812941bf..54be9ff74eb8 100644 --- a/versioned_docs/version-2.0-2.4/faq/install-and-configure-kubectl.md +++ b/versioned_docs/version-2.0-2.4/faq/install-and-configure-kubectl.md @@ -8,11 +8,11 @@ title: Installing and Configuring kubectl `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. -### Installation +## Installation See [kubectl Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/) for installation on your operating system. -### Configuration +## Configuration When you create a Kubernetes cluster with RKE, RKE creates a `kube_config_rancher-cluster.yml` in the local directory that contains credentials to connect to your new cluster with tools like `kubectl` or `helm`. diff --git a/versioned_docs/version-2.5/faq/install-and-configure-kubectl.md b/versioned_docs/version-2.5/faq/install-and-configure-kubectl.md index 2c4820540a99..ca3edbb78bd6 100644 --- a/versioned_docs/version-2.5/faq/install-and-configure-kubectl.md +++ b/versioned_docs/version-2.5/faq/install-and-configure-kubectl.md @@ -8,11 +8,11 @@ title: Installing and Configuring kubectl `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. -### Installation +## Installation See [kubectl Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/) for installation on your operating system. -### Configuration +## Configuration When you create a Kubernetes cluster with RKE, RKE creates a `kube_config_cluster.yml` in the local directory that contains credentials to connect to your new cluster with tools like `kubectl` or `helm`. diff --git a/versioned_docs/version-2.6/faq/install-and-configure-kubectl.md b/versioned_docs/version-2.6/faq/install-and-configure-kubectl.md index 9bcb56bc3d28..868eb42caacf 100644 --- a/versioned_docs/version-2.6/faq/install-and-configure-kubectl.md +++ b/versioned_docs/version-2.6/faq/install-and-configure-kubectl.md @@ -8,11 +8,11 @@ title: Installing and Configuring kubectl `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. -### Installation +## Installation See [kubectl Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/) for installation on your operating system. -### Configuration +## Configuration When you create a Kubernetes cluster with RKE, RKE creates a `kube_config_cluster.yml` in the local directory that contains credentials to connect to your new cluster with tools like `kubectl` or `helm`. diff --git a/versioned_docs/version-2.7/faq/install-and-configure-kubectl.md b/versioned_docs/version-2.7/faq/install-and-configure-kubectl.md index 9bcb56bc3d28..868eb42caacf 100644 --- a/versioned_docs/version-2.7/faq/install-and-configure-kubectl.md +++ b/versioned_docs/version-2.7/faq/install-and-configure-kubectl.md @@ -8,11 +8,11 @@ title: Installing and Configuring kubectl `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. -### Installation +## Installation See [kubectl Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/) for installation on your operating system. -### Configuration +## Configuration When you create a Kubernetes cluster with RKE, RKE creates a `kube_config_cluster.yml` in the local directory that contains credentials to connect to your new cluster with tools like `kubectl` or `helm`. diff --git a/versioned_docs/version-2.8/faq/install-and-configure-kubectl.md b/versioned_docs/version-2.8/faq/install-and-configure-kubectl.md index 9bcb56bc3d28..868eb42caacf 100644 --- a/versioned_docs/version-2.8/faq/install-and-configure-kubectl.md +++ b/versioned_docs/version-2.8/faq/install-and-configure-kubectl.md @@ -8,11 +8,11 @@ title: Installing and Configuring kubectl `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. -### Installation +## Installation See [kubectl Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/) for installation on your operating system. -### Configuration +## Configuration When you create a Kubernetes cluster with RKE, RKE creates a `kube_config_cluster.yml` in the local directory that contains credentials to connect to your new cluster with tools like `kubectl` or `helm`. diff --git a/versioned_docs/version-2.9/faq/install-and-configure-kubectl.md b/versioned_docs/version-2.9/faq/install-and-configure-kubectl.md index 9bcb56bc3d28..868eb42caacf 100644 --- a/versioned_docs/version-2.9/faq/install-and-configure-kubectl.md +++ b/versioned_docs/version-2.9/faq/install-and-configure-kubectl.md @@ -8,11 +8,11 @@ title: Installing and Configuring kubectl `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. -### Installation +## Installation See [kubectl Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/) for installation on your operating system. -### Configuration +## Configuration When you create a Kubernetes cluster with RKE, RKE creates a `kube_config_cluster.yml` in the local directory that contains credentials to connect to your new cluster with tools like `kubectl` or `helm`. From ef653c74a7a8ac2e14ed4948149ceb293b23fe46 Mon Sep 17 00:00:00 2001 From: martyav Date: Fri, 13 Sep 2024 16:01:57 -0400 Subject: [PATCH 04/31] fixed rancher-is-no-longer-needed.md --- docs/faq/rancher-is-no-longer-needed.md | 10 +++++----- .../version-2.0-2.4/faq/rancher-is-no-longer-needed.md | 10 +++++----- .../version-2.5/faq/rancher-is-no-longer-needed.md | 10 +++++----- .../version-2.6/faq/rancher-is-no-longer-needed.md | 10 +++++----- .../version-2.8/faq/rancher-is-no-longer-needed.md | 10 +++++----- .../version-2.9/faq/rancher-is-no-longer-needed.md | 10 +++++----- 6 files changed, 30 insertions(+), 30 deletions(-) diff --git a/docs/faq/rancher-is-no-longer-needed.md b/docs/faq/rancher-is-no-longer-needed.md index 3f825b0f0483..1752bd29b378 100644 --- a/docs/faq/rancher-is-no-longer-needed.md +++ b/docs/faq/rancher-is-no-longer-needed.md @@ -9,11 +9,11 @@ title: Rancher is No Longer Needed This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. -### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? +## If the Rancher server is deleted, what happens to the workloads in my downstream clusters? If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. -### If the Rancher server is deleted, how do I access my downstream clusters? +## If the Rancher server is deleted, how do I access my downstream clusters? The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: @@ -21,7 +21,7 @@ The capability to access a downstream cluster without Rancher depends on the typ - **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. - **RKE clusters:** To access an [RKE cluster,](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) the cluster must have the [authorized cluster endpoint](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. -### What if I don't want Rancher anymore? +## What if I don't want Rancher anymore? :::note @@ -44,7 +44,7 @@ If you installed Rancher with Docker, you can uninstall Rancher by removing the Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) -### What if I don't want my registered cluster managed by Rancher? +## What if I don't want my registered cluster managed by Rancher? If a registered cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was registered in Rancher. @@ -56,7 +56,7 @@ To detach the cluster, **Result:** The registered cluster is detached from Rancher and functions normally outside of Rancher. -### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? +## What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. diff --git a/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md b/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md index edcad2979ff0..4c48a3528427 100644 --- a/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md +++ b/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md @@ -9,11 +9,11 @@ title: Rancher is No Longer Needed This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. -### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? +## If the Rancher server is deleted, what happens to the workloads in my downstream clusters? If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. -### If the Rancher server is deleted, how do I access my downstream clusters? +## If the Rancher server is deleted, how do I access my downstream clusters? The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: @@ -21,7 +21,7 @@ The capability to access a downstream cluster without Rancher depends on the typ - **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. - **RKE clusters:** To access an [RKE cluster,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) the cluster must have the [authorized cluster endpoint](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. -### What if I don't want Rancher anymore? +## What if I don't want Rancher anymore? If you [installed Rancher on a Kubernetes cluster,](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md) remove Rancher by using the [System Tools](../reference-guides/system-tools.md) with the `remove` subcommand. @@ -29,7 +29,7 @@ If you installed Rancher with Docker, you can uninstall Rancher by removing the Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) -### What if I don't want my imported cluster managed by Rancher? +## What if I don't want my imported cluster managed by Rancher? If an imported cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was imported into Rancher. @@ -41,7 +41,7 @@ To detach the cluster, **Result:** The imported cluster is detached from Rancher and functions normally outside of Rancher. -### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? +## What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. diff --git a/versioned_docs/version-2.5/faq/rancher-is-no-longer-needed.md b/versioned_docs/version-2.5/faq/rancher-is-no-longer-needed.md index b051a46d5b07..517a0e2f1522 100644 --- a/versioned_docs/version-2.5/faq/rancher-is-no-longer-needed.md +++ b/versioned_docs/version-2.5/faq/rancher-is-no-longer-needed.md @@ -9,11 +9,11 @@ title: Rancher is No Longer Needed This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. -### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? +## If the Rancher server is deleted, what happens to the workloads in my downstream clusters? If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. -### If the Rancher server is deleted, how do I access my downstream clusters? +## If the Rancher server is deleted, how do I access my downstream clusters? The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: @@ -21,7 +21,7 @@ The capability to access a downstream cluster without Rancher depends on the typ - **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. - **RKE clusters:** Please note that you will no longer be able to manage the individual Kubernetes components or perform any upgrades on them after the deletion of the Rancher server. However, you can still access the cluster to manage your workloads. To access an [RKE cluster,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) the cluster must have the [authorized cluster endpoint](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. -### What if I don't want Rancher anymore? +## What if I don't want Rancher anymore? If you [installed Rancher on a Kubernetes cluster,](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md) remove Rancher by using the [System Tools](../reference-guides/system-tools.md) with the `remove` subcommand. @@ -38,7 +38,7 @@ If you installed Rancher with Docker, you can uninstall Rancher by removing the Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) -### What if I don't want my registered cluster managed by Rancher? +## What if I don't want my registered cluster managed by Rancher? If a registered cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was registered in Rancher. @@ -50,7 +50,7 @@ To detach the cluster, **Result:** The registered cluster is detached from Rancher and functions normally outside of Rancher. -### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? +## What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. diff --git a/versioned_docs/version-2.6/faq/rancher-is-no-longer-needed.md b/versioned_docs/version-2.6/faq/rancher-is-no-longer-needed.md index f1fd2701d5d5..91b5c9a8777b 100644 --- a/versioned_docs/version-2.6/faq/rancher-is-no-longer-needed.md +++ b/versioned_docs/version-2.6/faq/rancher-is-no-longer-needed.md @@ -9,11 +9,11 @@ title: Rancher is No Longer Needed This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. -### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? +## If the Rancher server is deleted, what happens to the workloads in my downstream clusters? If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. -### If the Rancher server is deleted, how do I access my downstream clusters? +## If the Rancher server is deleted, how do I access my downstream clusters? The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: @@ -21,7 +21,7 @@ The capability to access a downstream cluster without Rancher depends on the typ - **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. - **RKE clusters:** To access an [RKE cluster,](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) the cluster must have the [authorized cluster endpoint](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. -### What if I don't want Rancher anymore? +## What if I don't want Rancher anymore? :::note @@ -44,7 +44,7 @@ If you installed Rancher with Docker, you can uninstall Rancher by removing the Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) -### What if I don't want my registered cluster managed by Rancher? +## What if I don't want my registered cluster managed by Rancher? If a registered cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was registered in Rancher. @@ -56,7 +56,7 @@ To detach the cluster, **Result:** The registered cluster is detached from Rancher and functions normally outside of Rancher. -### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? +## What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. diff --git a/versioned_docs/version-2.8/faq/rancher-is-no-longer-needed.md b/versioned_docs/version-2.8/faq/rancher-is-no-longer-needed.md index 3f825b0f0483..1752bd29b378 100644 --- a/versioned_docs/version-2.8/faq/rancher-is-no-longer-needed.md +++ b/versioned_docs/version-2.8/faq/rancher-is-no-longer-needed.md @@ -9,11 +9,11 @@ title: Rancher is No Longer Needed This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. -### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? +## If the Rancher server is deleted, what happens to the workloads in my downstream clusters? If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. -### If the Rancher server is deleted, how do I access my downstream clusters? +## If the Rancher server is deleted, how do I access my downstream clusters? The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: @@ -21,7 +21,7 @@ The capability to access a downstream cluster without Rancher depends on the typ - **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. - **RKE clusters:** To access an [RKE cluster,](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) the cluster must have the [authorized cluster endpoint](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. -### What if I don't want Rancher anymore? +## What if I don't want Rancher anymore? :::note @@ -44,7 +44,7 @@ If you installed Rancher with Docker, you can uninstall Rancher by removing the Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) -### What if I don't want my registered cluster managed by Rancher? +## What if I don't want my registered cluster managed by Rancher? If a registered cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was registered in Rancher. @@ -56,7 +56,7 @@ To detach the cluster, **Result:** The registered cluster is detached from Rancher and functions normally outside of Rancher. -### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? +## What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. diff --git a/versioned_docs/version-2.9/faq/rancher-is-no-longer-needed.md b/versioned_docs/version-2.9/faq/rancher-is-no-longer-needed.md index 3f825b0f0483..1752bd29b378 100644 --- a/versioned_docs/version-2.9/faq/rancher-is-no-longer-needed.md +++ b/versioned_docs/version-2.9/faq/rancher-is-no-longer-needed.md @@ -9,11 +9,11 @@ title: Rancher is No Longer Needed This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. -### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? +## If the Rancher server is deleted, what happens to the workloads in my downstream clusters? If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. -### If the Rancher server is deleted, how do I access my downstream clusters? +## If the Rancher server is deleted, how do I access my downstream clusters? The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: @@ -21,7 +21,7 @@ The capability to access a downstream cluster without Rancher depends on the typ - **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. - **RKE clusters:** To access an [RKE cluster,](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) the cluster must have the [authorized cluster endpoint](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. -### What if I don't want Rancher anymore? +## What if I don't want Rancher anymore? :::note @@ -44,7 +44,7 @@ If you installed Rancher with Docker, you can uninstall Rancher by removing the Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) -### What if I don't want my registered cluster managed by Rancher? +## What if I don't want my registered cluster managed by Rancher? If a registered cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was registered in Rancher. @@ -56,7 +56,7 @@ To detach the cluster, **Result:** The registered cluster is detached from Rancher and functions normally outside of Rancher. -### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? +## What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. From 9c8ad05b968ce5872a7b08a4d42601ab53d57de1 Mon Sep 17 00:00:00 2001 From: martyav Date: Fri, 13 Sep 2024 16:06:02 -0400 Subject: [PATCH 05/31] fixed security.md --- docs/faq/security.md | 7 +++---- versioned_docs/version-2.0-2.4/faq/security.md | 6 ++---- versioned_docs/version-2.5/faq/security.md | 6 ++---- versioned_docs/version-2.7/faq/security.md | 6 +++--- versioned_docs/version-2.8/faq/security.md | 6 +++--- versioned_docs/version-2.9/faq/security.md | 7 +++---- 6 files changed, 16 insertions(+), 22 deletions(-) diff --git a/docs/faq/security.md b/docs/faq/security.md index 08fd84227308..165fea2ba919 100644 --- a/docs/faq/security.md +++ b/docs/faq/security.md @@ -1,21 +1,20 @@ --- title: Security FAQ - --- -### Is there a Hardening Guide? +## Is there a Hardening Guide? The Hardening Guide is located in the main [Security](../reference-guides/rancher-security/rancher-security.md) section. -### Have hardened Rancher Kubernetes clusters been evaluated by the CIS Kubernetes Benchmark? Where can I find the results? +## Have hardened Rancher Kubernetes clusters been evaluated by the CIS Kubernetes Benchmark? Where can I find the results? We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security](../reference-guides/rancher-security/rancher-security.md) section. -### How does Rancher verify communication with downstream clusters, and what are some associated security concerns? +## How does Rancher verify communication with downstream clusters, and what are some associated security concerns? Communication between the Rancher server and downstream clusters is performed through agents. Rancher uses either a registered certificate authority (CA) bundle or the local trust store to verify communication between Rancher agents and the Rancher server. Using a CA bundle for verification is more strict, as only the certificates based on that bundle are trusted. If TLS verification for a explicit CA bundle fails, Rancher may fall back to using the local trust store for verifying future communication. Any CA within the local trust store can then be used to generate a valid certificate. diff --git a/versioned_docs/version-2.0-2.4/faq/security.md b/versioned_docs/version-2.0-2.4/faq/security.md index 9d23ec6455ee..bbd57da8770e 100644 --- a/versioned_docs/version-2.0-2.4/faq/security.md +++ b/versioned_docs/version-2.0-2.4/faq/security.md @@ -6,12 +6,10 @@ title: Security FAQ -**Is there a Hardening Guide?** +## Is there a Hardening Guide? The Hardening Guide is now located in the main [Security](../reference-guides/rancher-security/rancher-security.md) section. -
- -**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** +## What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked? We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security](../reference-guides/rancher-security/rancher-security.md) section. diff --git a/versioned_docs/version-2.5/faq/security.md b/versioned_docs/version-2.5/faq/security.md index 9d23ec6455ee..bbd57da8770e 100644 --- a/versioned_docs/version-2.5/faq/security.md +++ b/versioned_docs/version-2.5/faq/security.md @@ -6,12 +6,10 @@ title: Security FAQ -**Is there a Hardening Guide?** +## Is there a Hardening Guide? The Hardening Guide is now located in the main [Security](../reference-guides/rancher-security/rancher-security.md) section. -
- -**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** +## What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked? We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security](../reference-guides/rancher-security/rancher-security.md) section. diff --git a/versioned_docs/version-2.7/faq/security.md b/versioned_docs/version-2.7/faq/security.md index 08fd84227308..5efa87767862 100644 --- a/versioned_docs/version-2.7/faq/security.md +++ b/versioned_docs/version-2.7/faq/security.md @@ -7,15 +7,15 @@ title: Security FAQ -### Is there a Hardening Guide? +## Is there a Hardening Guide? The Hardening Guide is located in the main [Security](../reference-guides/rancher-security/rancher-security.md) section. -### Have hardened Rancher Kubernetes clusters been evaluated by the CIS Kubernetes Benchmark? Where can I find the results? +## Have hardened Rancher Kubernetes clusters been evaluated by the CIS Kubernetes Benchmark? Where can I find the results? We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security](../reference-guides/rancher-security/rancher-security.md) section. -### How does Rancher verify communication with downstream clusters, and what are some associated security concerns? +## How does Rancher verify communication with downstream clusters, and what are some associated security concerns? Communication between the Rancher server and downstream clusters is performed through agents. Rancher uses either a registered certificate authority (CA) bundle or the local trust store to verify communication between Rancher agents and the Rancher server. Using a CA bundle for verification is more strict, as only the certificates based on that bundle are trusted. If TLS verification for a explicit CA bundle fails, Rancher may fall back to using the local trust store for verifying future communication. Any CA within the local trust store can then be used to generate a valid certificate. diff --git a/versioned_docs/version-2.8/faq/security.md b/versioned_docs/version-2.8/faq/security.md index 08fd84227308..5efa87767862 100644 --- a/versioned_docs/version-2.8/faq/security.md +++ b/versioned_docs/version-2.8/faq/security.md @@ -7,15 +7,15 @@ title: Security FAQ -### Is there a Hardening Guide? +## Is there a Hardening Guide? The Hardening Guide is located in the main [Security](../reference-guides/rancher-security/rancher-security.md) section. -### Have hardened Rancher Kubernetes clusters been evaluated by the CIS Kubernetes Benchmark? Where can I find the results? +## Have hardened Rancher Kubernetes clusters been evaluated by the CIS Kubernetes Benchmark? Where can I find the results? We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security](../reference-guides/rancher-security/rancher-security.md) section. -### How does Rancher verify communication with downstream clusters, and what are some associated security concerns? +## How does Rancher verify communication with downstream clusters, and what are some associated security concerns? Communication between the Rancher server and downstream clusters is performed through agents. Rancher uses either a registered certificate authority (CA) bundle or the local trust store to verify communication between Rancher agents and the Rancher server. Using a CA bundle for verification is more strict, as only the certificates based on that bundle are trusted. If TLS verification for a explicit CA bundle fails, Rancher may fall back to using the local trust store for verifying future communication. Any CA within the local trust store can then be used to generate a valid certificate. diff --git a/versioned_docs/version-2.9/faq/security.md b/versioned_docs/version-2.9/faq/security.md index 08fd84227308..165fea2ba919 100644 --- a/versioned_docs/version-2.9/faq/security.md +++ b/versioned_docs/version-2.9/faq/security.md @@ -1,21 +1,20 @@ --- title: Security FAQ - --- -### Is there a Hardening Guide? +## Is there a Hardening Guide? The Hardening Guide is located in the main [Security](../reference-guides/rancher-security/rancher-security.md) section. -### Have hardened Rancher Kubernetes clusters been evaluated by the CIS Kubernetes Benchmark? Where can I find the results? +## Have hardened Rancher Kubernetes clusters been evaluated by the CIS Kubernetes Benchmark? Where can I find the results? We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security](../reference-guides/rancher-security/rancher-security.md) section. -### How does Rancher verify communication with downstream clusters, and what are some associated security concerns? +## How does Rancher verify communication with downstream clusters, and what are some associated security concerns? Communication between the Rancher server and downstream clusters is performed through agents. Rancher uses either a registered certificate authority (CA) bundle or the local trust store to verify communication between Rancher agents and the Rancher server. Using a CA bundle for verification is more strict, as only the certificates based on that bundle are trusted. If TLS verification for a explicit CA bundle fails, Rancher may fall back to using the local trust store for verifying future communication. Any CA within the local trust store can then be used to generate a valid certificate. From 4d41b09b62803d2b50f89e1291d13e93f0df2f60 Mon Sep 17 00:00:00 2001 From: martyav Date: Fri, 13 Sep 2024 16:24:22 -0400 Subject: [PATCH 06/31] fixed technical-items.md + spacing, duplicate section, admonitions --- docs/faq/technical-items.md | 40 +++++++----- .../version-2.0-2.4/faq/technical-items.md | 65 +++++++++---------- .../version-2.5/faq/technical-items.md | 47 ++++++++------ .../version-2.6/faq/technical-items.md | 40 +++++++----- .../version-2.7/faq/technical-items.md | 39 ++++++----- .../version-2.8/faq/technical-items.md | 40 +++++++----- .../version-2.9/faq/technical-items.md | 39 ++++++----- 7 files changed, 165 insertions(+), 145 deletions(-) diff --git a/docs/faq/technical-items.md b/docs/faq/technical-items.md index 42bfe9667268..20e714a49300 100644 --- a/docs/faq/technical-items.md +++ b/docs/faq/technical-items.md @@ -6,9 +6,10 @@ title: Technical FAQ -### How can I reset the administrator password? +## How can I reset the administrator password? + +Docker install: -Docker Install: ``` $ docker exec -ti reset-password New password for default administrator (user-xxxxx): @@ -16,6 +17,7 @@ New password for default administrator (user-xxxxx): ``` Kubernetes install (Helm): + ``` $ KUBECONFIG=./kube_config_cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher --no-headers | head -1 | awk '{ print $1 }') -c rancher -- reset-password @@ -23,10 +25,10 @@ New password for default administrator (user-xxxxx): ``` +## I deleted/deactivated the last admin, how can I fix it? +Docker install: -### I deleted/deactivated the last admin, how can I fix it? -Docker Install: ``` $ docker exec -ti ensure-default-admin New default administrator (user-xxxxx) @@ -35,38 +37,40 @@ New password for default administrator (user-xxxxx): ``` Kubernetes install (Helm): + ``` $ KUBECONFIG=./kube_config_cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin New password for default administrator (user-xxxxx): ``` -### How can I enable debug logging? + +## How can I enable debug logging? See [Troubleshooting: Logging](../troubleshooting/other-troubleshooting-tips/logging.md) -### My ClusterIP does not respond to ping +## My ClusterIP does not respond to ping ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. -### Where can I manage Node Templates? +## Where can I manage Node Templates? Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. -### Why is my Layer-4 Load Balancer in `Pending` state? +## Why is my Layer-4 Load Balancer in `Pending` state? The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) -### Where is the state of Rancher stored? +## Where is the state of Rancher stored? - Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. - Kubernetes install: in the etcd of the RKE cluster created to run Rancher. -### How are the supported Docker versions determined? +## How are the supported Docker versions determined? We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. -### How can I access nodes created by Rancher? +## How can I access nodes created by Rancher? SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. @@ -78,14 +82,14 @@ Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. $ ssh -i id_rsa user@ip_of_node ``` -### How can I automate task X in Rancher? +## How can I automate task X in Rancher? The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: * Visit `https://your_rancher_ip/v3` and browse the API options. * Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) -### The IP address of a node changed, how can I recover? +## The IP address of a node changed, how can I recover? A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. @@ -93,11 +97,11 @@ When the IP address of the node changed, Rancher lost connection to the node, so When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. -### How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? +## How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? You can add more arguments/binds/environment variables via the [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-cluster-config-file-reference) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls](https://rancher.com/docs/rke/latest/en/example-yamls/). -### How do I check if my certificate chain is valid? +## How do I check if my certificate chain is valid? Use the `openssl verify` command to validate your certificate chain: @@ -138,7 +142,7 @@ subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA ``` -### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? +## How do I check `Common Name` and `Subject Alternative Names` in my server certificate? Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. @@ -156,7 +160,7 @@ openssl x509 -noout -in cert.pem -text | grep DNS DNS:rancher.my.org ``` -### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? +## Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? This is due to a combination of the following default Kubernetes settings: @@ -175,6 +179,6 @@ In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. Se * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. -### Can I use keyboard shortcuts in the UI? +## Can I use keyboard shortcuts in the UI? Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. diff --git a/versioned_docs/version-2.0-2.4/faq/technical-items.md b/versioned_docs/version-2.0-2.4/faq/technical-items.md index 3cc8542d4a0c..744ca043b0ee 100644 --- a/versioned_docs/version-2.0-2.4/faq/technical-items.md +++ b/versioned_docs/version-2.0-2.4/faq/technical-items.md @@ -6,9 +6,10 @@ title: Technical FAQ -### How can I reset the administrator password? +## How can I reset the administrator password? + +Docker install: -Docker Install: ``` $ docker exec -ti reset-password New password for default administrator (user-xxxxx): @@ -16,6 +17,7 @@ New password for default administrator (user-xxxxx): ``` Kubernetes install (Helm): + ``` $ KUBECONFIG=./kube_config_rancher-cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- reset-password @@ -23,11 +25,14 @@ New password for default administrator (user-xxxxx): ``` -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to start using the Helm chart. +### RKE add-on install (only supported up to Rancher v2.0.8) + +:::tip +> If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to start using the Helm chart. +::: Kubernetes install (RKE add-on): + ``` $ KUBECONFIG=./kube_config_rancher-cluster.yml $ kubectl --kubeconfig $KUBECONFIG exec -n cattle-system $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name=="cattle-server") | .metadata.name') -- reset-password @@ -35,8 +40,10 @@ New password for default administrator (user-xxxxx): ``` -### I deleted/deactivated the last admin, how can I fix it? -Docker Install: +## I deleted/deactivated the last admin, how can I fix it? + +Docker install: + ``` $ docker exec -ti ensure-default-admin New default administrator (user-xxxxx) @@ -52,44 +59,32 @@ New password for default administrator (user-xxxxx): ``` -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to start using the Helm chart. - -Kubernetes install (RKE add-on): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG exec -n cattle-system $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name=="cattle-server") | .metadata.name') -- ensure-default-admin -New password for default admin user (user-xxxxx): - -``` - -### How can I enable debug logging? +## How can I enable debug logging? See [Troubleshooting: Logging](../troubleshooting/other-troubleshooting-tips/logging.md) -### My ClusterIP does not respond to ping +## My ClusterIP does not respond to ping ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. -### Where can I manage Node Templates? +## Where can I manage Node Templates? Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. -### Why is my Layer-4 Load Balancer in `Pending` state? +## Why is my Layer-4 Load Balancer in `Pending` state? The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/set-up-cloud-providers.md) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) -### Where is the state of Rancher stored? +## Where is the state of Rancher stored? - Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. - Kubernetes install: in the etcd of the RKE cluster created to run Rancher. -### How are the supported Docker versions determined? +## How are the supported Docker versions determined? We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. -### How can I access nodes created by Rancher? +## How can I access nodes created by Rancher? SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. @@ -101,14 +96,14 @@ Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. $ ssh -i id_rsa user@ip_of_node ``` -### How can I automate task X in Rancher? +## How can I automate task X in Rancher? The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: * Visit `https://your_rancher_ip/v3` and browse the API options. * Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) -### The IP address of a node changed, how can I recover? +## The IP address of a node changed, how can I recover? A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. @@ -116,15 +111,17 @@ When the IP address of the node changed, Rancher lost connection to the node, so When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. -### How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? +## How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? You can add more arguments/binds/environment variables via the [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#cluster-config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls](https://rancher.com/docs/rke/latest/en/example-yamls/). -### How do I check if my certificate chain is valid? +## How do I check if my certificate chain is valid? Use the `openssl verify` command to validate your certificate chain: ->**Note:** Configure `SSL_CERT_DIR` and `SSL_CERT_FILE` to a dummy location to make sure the OS installed certificates are not used when verifying manually. +:::tip +Configure `SSL_CERT_DIR` and `SSL_CERT_FILE` to a dummy location to make sure the OS installed certificates are not used when verifying manually. +::: ``` SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem rancher.yourdomain.com.pem @@ -157,7 +154,7 @@ subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA ``` -### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? +## How do I check `Common Name` and `Subject Alternative Names` in my server certificate? Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. @@ -175,7 +172,7 @@ openssl x509 -noout -in cert.pem -text | grep DNS DNS:rancher.my.org ``` -### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? +## Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? This is due to a combination of the following default Kubernetes settings: @@ -194,6 +191,6 @@ In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. Se * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. -### Can I use keyboard shortcuts in the UI? +## Can I use keyboard shortcuts in the UI? Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. diff --git a/versioned_docs/version-2.5/faq/technical-items.md b/versioned_docs/version-2.5/faq/technical-items.md index 7328c27c200c..bd9580e5e9c8 100644 --- a/versioned_docs/version-2.5/faq/technical-items.md +++ b/versioned_docs/version-2.5/faq/technical-items.md @@ -6,9 +6,10 @@ title: Technical FAQ -### How can I reset the administrator password? +## How can I reset the administrator password? + +Docker install: -Docker Install: ``` $ docker exec -ti reset-password New password for default administrator (user-xxxxx): @@ -16,6 +17,7 @@ New password for default administrator (user-xxxxx): ``` Kubernetes install (Helm): + ``` $ KUBECONFIG=./kube_config_cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- reset-password @@ -23,10 +25,10 @@ New password for default administrator (user-xxxxx): ``` +## I deleted/deactivated the last admin, how can I fix it? +Docker install: -### I deleted/deactivated the last admin, how can I fix it? -Docker Install: ``` $ docker exec -ti ensure-default-admin New default administrator (user-xxxxx) @@ -35,38 +37,40 @@ New password for default administrator (user-xxxxx): ``` Kubernetes install (Helm): + ``` $ KUBECONFIG=./kube_config_cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin New password for default administrator (user-xxxxx): ``` -### How can I enable debug logging? + +## How can I enable debug logging? See [Troubleshooting: Logging](../troubleshooting/other-troubleshooting-tips/logging.md) -### My ClusterIP does not respond to ping +## My ClusterIP does not respond to ping ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. -### Where can I manage Node Templates? +## Where can I manage Node Templates? Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. -### Why is my Layer-4 Load Balancer in `Pending` state? +## Why is my Layer-4 Load Balancer in `Pending` state? The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/set-up-cloud-providers.md) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) -### Where is the state of Rancher stored? +## Where is the state of Rancher stored? - Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. - Kubernetes install: in the etcd of the RKE cluster created to run Rancher. -### How are the supported Docker versions determined? +## How are the supported Docker versions determined? We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. -### How can I access nodes created by Rancher? +## How can I access nodes created by Rancher? SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. @@ -78,14 +82,14 @@ Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. $ ssh -i id_rsa user@ip_of_node ``` -### How can I automate task X in Rancher? +## How can I automate task X in Rancher? The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: * Visit `https://your_rancher_ip/v3` and browse the API options. * Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) -### The IP address of a node changed, how can I recover? +## The IP address of a node changed, how can I recover? A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. @@ -93,15 +97,17 @@ When the IP address of the node changed, Rancher lost connection to the node, so When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. -### How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? +## How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? You can add more arguments/binds/environment variables via the [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#editing-clusters-with-yaml) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls](https://rancher.com/docs/rke/latest/en/example-yamls/). -### How do I check if my certificate chain is valid? +## How do I check if my certificate chain is valid? Use the `openssl verify` command to validate your certificate chain: ->**Note:** Configure `SSL_CERT_DIR` and `SSL_CERT_FILE` to a dummy location to make sure the OS installed certificates are not used when verifying manually. +:::tip +Configure `SSL_CERT_DIR` and `SSL_CERT_FILE` to a dummy location to make sure the OS installed certificates are not used when verifying manually. +::: ``` SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem rancher.yourdomain.com.pem @@ -134,7 +140,7 @@ subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA ``` -### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? +## How do I check `Common Name` and `Subject Alternative Names` in my server certificate? Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. @@ -152,7 +158,7 @@ openssl x509 -noout -in cert.pem -text | grep DNS DNS:rancher.my.org ``` -### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? +## Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? This is due to a combination of the following default Kubernetes settings: @@ -171,11 +177,10 @@ In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. Se * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. -### Can I use keyboard shortcuts in the UI? +## Can I use keyboard shortcuts in the UI? Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. - -### What does `Unknown schema for type:` errors followed by something like `catalog.cattle.io.operation` mean when trying to modify an App? +## What does `Unknown schema for type:` errors followed by something like `catalog.cattle.io.operation` mean when trying to modify an App? This error occurs when Kubernetes can not find the CRD mentioned. The vast majority of the time these are a result of missing RBAC permissions. Try with an admin user and if this works, add permissions for the resource mentioned by the error (ie. `Get`, `List`, `Patch` as needed). diff --git a/versioned_docs/version-2.6/faq/technical-items.md b/versioned_docs/version-2.6/faq/technical-items.md index 42bfe9667268..55602f031821 100644 --- a/versioned_docs/version-2.6/faq/technical-items.md +++ b/versioned_docs/version-2.6/faq/technical-items.md @@ -6,9 +6,10 @@ title: Technical FAQ -### How can I reset the administrator password? +## How can I reset the administrator password? + +Docker install: -Docker Install: ``` $ docker exec -ti reset-password New password for default administrator (user-xxxxx): @@ -16,6 +17,7 @@ New password for default administrator (user-xxxxx): ``` Kubernetes install (Helm): + ``` $ KUBECONFIG=./kube_config_cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher --no-headers | head -1 | awk '{ print $1 }') -c rancher -- reset-password @@ -23,10 +25,10 @@ New password for default administrator (user-xxxxx): ``` +## I deleted/deactivated the last admin, how can I fix it? +Docker install -### I deleted/deactivated the last admin, how can I fix it? -Docker Install: ``` $ docker exec -ti ensure-default-admin New default administrator (user-xxxxx) @@ -35,38 +37,40 @@ New password for default administrator (user-xxxxx): ``` Kubernetes install (Helm): + ``` $ KUBECONFIG=./kube_config_cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin New password for default administrator (user-xxxxx): ``` -### How can I enable debug logging? + +## How can I enable debug logging? See [Troubleshooting: Logging](../troubleshooting/other-troubleshooting-tips/logging.md) -### My ClusterIP does not respond to ping +## My ClusterIP does not respond to ping ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. -### Where can I manage Node Templates? +## Where can I manage Node Templates? Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. -### Why is my Layer-4 Load Balancer in `Pending` state? +## Why is my Layer-4 Load Balancer in `Pending` state? The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) -### Where is the state of Rancher stored? +## Where is the state of Rancher stored? - Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. - Kubernetes install: in the etcd of the RKE cluster created to run Rancher. -### How are the supported Docker versions determined? +## How are the supported Docker versions determined? We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. -### How can I access nodes created by Rancher? +## How can I access nodes created by Rancher? SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. @@ -78,14 +82,14 @@ Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. $ ssh -i id_rsa user@ip_of_node ``` -### How can I automate task X in Rancher? +## How can I automate task X in Rancher? The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: * Visit `https://your_rancher_ip/v3` and browse the API options. * Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) -### The IP address of a node changed, how can I recover? +## The IP address of a node changed, how can I recover? A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. @@ -93,11 +97,11 @@ When the IP address of the node changed, Rancher lost connection to the node, so When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. -### How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? +## How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? You can add more arguments/binds/environment variables via the [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-cluster-config-file-reference) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls](https://rancher.com/docs/rke/latest/en/example-yamls/). -### How do I check if my certificate chain is valid? +## How do I check if my certificate chain is valid? Use the `openssl verify` command to validate your certificate chain: @@ -138,7 +142,7 @@ subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA ``` -### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? +## How do I check `Common Name` and `Subject Alternative Names` in my server certificate? Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. @@ -156,7 +160,7 @@ openssl x509 -noout -in cert.pem -text | grep DNS DNS:rancher.my.org ``` -### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? +## Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? This is due to a combination of the following default Kubernetes settings: @@ -175,6 +179,6 @@ In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. Se * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. -### Can I use keyboard shortcuts in the UI? +## Can I use keyboard shortcuts in the UI? Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. diff --git a/versioned_docs/version-2.7/faq/technical-items.md b/versioned_docs/version-2.7/faq/technical-items.md index 42bfe9667268..bf7d3ea63aa7 100644 --- a/versioned_docs/version-2.7/faq/technical-items.md +++ b/versioned_docs/version-2.7/faq/technical-items.md @@ -6,9 +6,10 @@ title: Technical FAQ -### How can I reset the administrator password? +## How can I reset the administrator password? + +Docker install: -Docker Install: ``` $ docker exec -ti reset-password New password for default administrator (user-xxxxx): @@ -16,6 +17,7 @@ New password for default administrator (user-xxxxx): ``` Kubernetes install (Helm): + ``` $ KUBECONFIG=./kube_config_cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher --no-headers | head -1 | awk '{ print $1 }') -c rancher -- reset-password @@ -23,10 +25,10 @@ New password for default administrator (user-xxxxx): ``` +## I deleted/deactivated the last admin, how can I fix it? +Docker install: -### I deleted/deactivated the last admin, how can I fix it? -Docker Install: ``` $ docker exec -ti ensure-default-admin New default administrator (user-xxxxx) @@ -35,38 +37,39 @@ New password for default administrator (user-xxxxx): ``` Kubernetes install (Helm): + ``` $ KUBECONFIG=./kube_config_cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin New password for default administrator (user-xxxxx): ``` -### How can I enable debug logging? +## How can I enable debug logging? See [Troubleshooting: Logging](../troubleshooting/other-troubleshooting-tips/logging.md) -### My ClusterIP does not respond to ping +## My ClusterIP does not respond to ping ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. -### Where can I manage Node Templates? +## Where can I manage Node Templates? Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. -### Why is my Layer-4 Load Balancer in `Pending` state? +## Why is my Layer-4 Load Balancer in `Pending` state? The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) -### Where is the state of Rancher stored? +## Where is the state of Rancher stored? - Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. - Kubernetes install: in the etcd of the RKE cluster created to run Rancher. -### How are the supported Docker versions determined? +## How are the supported Docker versions determined? We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. -### How can I access nodes created by Rancher? +## How can I access nodes created by Rancher? SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. @@ -78,14 +81,14 @@ Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. $ ssh -i id_rsa user@ip_of_node ``` -### How can I automate task X in Rancher? +## How can I automate task X in Rancher? The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: * Visit `https://your_rancher_ip/v3` and browse the API options. * Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) -### The IP address of a node changed, how can I recover? +## The IP address of a node changed, how can I recover? A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. @@ -93,11 +96,11 @@ When the IP address of the node changed, Rancher lost connection to the node, so When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. -### How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? +## How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? You can add more arguments/binds/environment variables via the [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-cluster-config-file-reference) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls](https://rancher.com/docs/rke/latest/en/example-yamls/). -### How do I check if my certificate chain is valid? +## How do I check if my certificate chain is valid? Use the `openssl verify` command to validate your certificate chain: @@ -138,7 +141,7 @@ subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA ``` -### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? +## How do I check `Common Name` and `Subject Alternative Names` in my server certificate? Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. @@ -156,7 +159,7 @@ openssl x509 -noout -in cert.pem -text | grep DNS DNS:rancher.my.org ``` -### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? +## Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? This is due to a combination of the following default Kubernetes settings: @@ -175,6 +178,6 @@ In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. Se * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. -### Can I use keyboard shortcuts in the UI? +## Can I use keyboard shortcuts in the UI? Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. diff --git a/versioned_docs/version-2.8/faq/technical-items.md b/versioned_docs/version-2.8/faq/technical-items.md index 42bfe9667268..20e714a49300 100644 --- a/versioned_docs/version-2.8/faq/technical-items.md +++ b/versioned_docs/version-2.8/faq/technical-items.md @@ -6,9 +6,10 @@ title: Technical FAQ -### How can I reset the administrator password? +## How can I reset the administrator password? + +Docker install: -Docker Install: ``` $ docker exec -ti reset-password New password for default administrator (user-xxxxx): @@ -16,6 +17,7 @@ New password for default administrator (user-xxxxx): ``` Kubernetes install (Helm): + ``` $ KUBECONFIG=./kube_config_cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher --no-headers | head -1 | awk '{ print $1 }') -c rancher -- reset-password @@ -23,10 +25,10 @@ New password for default administrator (user-xxxxx): ``` +## I deleted/deactivated the last admin, how can I fix it? +Docker install: -### I deleted/deactivated the last admin, how can I fix it? -Docker Install: ``` $ docker exec -ti ensure-default-admin New default administrator (user-xxxxx) @@ -35,38 +37,40 @@ New password for default administrator (user-xxxxx): ``` Kubernetes install (Helm): + ``` $ KUBECONFIG=./kube_config_cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin New password for default administrator (user-xxxxx): ``` -### How can I enable debug logging? + +## How can I enable debug logging? See [Troubleshooting: Logging](../troubleshooting/other-troubleshooting-tips/logging.md) -### My ClusterIP does not respond to ping +## My ClusterIP does not respond to ping ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. -### Where can I manage Node Templates? +## Where can I manage Node Templates? Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. -### Why is my Layer-4 Load Balancer in `Pending` state? +## Why is my Layer-4 Load Balancer in `Pending` state? The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) -### Where is the state of Rancher stored? +## Where is the state of Rancher stored? - Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. - Kubernetes install: in the etcd of the RKE cluster created to run Rancher. -### How are the supported Docker versions determined? +## How are the supported Docker versions determined? We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. -### How can I access nodes created by Rancher? +## How can I access nodes created by Rancher? SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. @@ -78,14 +82,14 @@ Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. $ ssh -i id_rsa user@ip_of_node ``` -### How can I automate task X in Rancher? +## How can I automate task X in Rancher? The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: * Visit `https://your_rancher_ip/v3` and browse the API options. * Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) -### The IP address of a node changed, how can I recover? +## The IP address of a node changed, how can I recover? A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. @@ -93,11 +97,11 @@ When the IP address of the node changed, Rancher lost connection to the node, so When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. -### How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? +## How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? You can add more arguments/binds/environment variables via the [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-cluster-config-file-reference) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls](https://rancher.com/docs/rke/latest/en/example-yamls/). -### How do I check if my certificate chain is valid? +## How do I check if my certificate chain is valid? Use the `openssl verify` command to validate your certificate chain: @@ -138,7 +142,7 @@ subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA ``` -### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? +## How do I check `Common Name` and `Subject Alternative Names` in my server certificate? Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. @@ -156,7 +160,7 @@ openssl x509 -noout -in cert.pem -text | grep DNS DNS:rancher.my.org ``` -### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? +## Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? This is due to a combination of the following default Kubernetes settings: @@ -175,6 +179,6 @@ In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. Se * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. -### Can I use keyboard shortcuts in the UI? +## Can I use keyboard shortcuts in the UI? Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. diff --git a/versioned_docs/version-2.9/faq/technical-items.md b/versioned_docs/version-2.9/faq/technical-items.md index 42bfe9667268..ec9bb24aefba 100644 --- a/versioned_docs/version-2.9/faq/technical-items.md +++ b/versioned_docs/version-2.9/faq/technical-items.md @@ -6,9 +6,10 @@ title: Technical FAQ -### How can I reset the administrator password? +## How can I reset the administrator password? + +Docker install: -Docker Install: ``` $ docker exec -ti reset-password New password for default administrator (user-xxxxx): @@ -23,10 +24,10 @@ New password for default administrator (user-xxxxx): ``` +### I deleted/deactivated the last admin, how can I fix it? +Docker install: -### I deleted/deactivated the last admin, how can I fix it? -Docker Install: ``` $ docker exec -ti ensure-default-admin New default administrator (user-xxxxx) @@ -35,38 +36,40 @@ New password for default administrator (user-xxxxx): ``` Kubernetes install (Helm): + ``` $ KUBECONFIG=./kube_config_cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin New password for default administrator (user-xxxxx): ``` -### How can I enable debug logging? + +## How can I enable debug logging? See [Troubleshooting: Logging](../troubleshooting/other-troubleshooting-tips/logging.md) -### My ClusterIP does not respond to ping +## My ClusterIP does not respond to ping ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. -### Where can I manage Node Templates? +## Where can I manage Node Templates? Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. -### Why is my Layer-4 Load Balancer in `Pending` state? +## Why is my Layer-4 Load Balancer in `Pending` state? The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) -### Where is the state of Rancher stored? +## Where is the state of Rancher stored? - Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. - Kubernetes install: in the etcd of the RKE cluster created to run Rancher. -### How are the supported Docker versions determined? +## How are the supported Docker versions determined? We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. -### How can I access nodes created by Rancher? +## How can I access nodes created by Rancher? SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. @@ -78,14 +81,14 @@ Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. $ ssh -i id_rsa user@ip_of_node ``` -### How can I automate task X in Rancher? +## How can I automate task X in Rancher? The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: * Visit `https://your_rancher_ip/v3` and browse the API options. * Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) -### The IP address of a node changed, how can I recover? +## The IP address of a node changed, how can I recover? A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. @@ -93,11 +96,11 @@ When the IP address of the node changed, Rancher lost connection to the node, so When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. -### How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? +## How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? You can add more arguments/binds/environment variables via the [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-cluster-config-file-reference) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls](https://rancher.com/docs/rke/latest/en/example-yamls/). -### How do I check if my certificate chain is valid? +## How do I check if my certificate chain is valid? Use the `openssl verify` command to validate your certificate chain: @@ -138,7 +141,7 @@ subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA ``` -### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? +## How do I check `Common Name` and `Subject Alternative Names` in my server certificate? Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. @@ -156,7 +159,7 @@ openssl x509 -noout -in cert.pem -text | grep DNS DNS:rancher.my.org ``` -### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? +## Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? This is due to a combination of the following default Kubernetes settings: @@ -175,6 +178,6 @@ In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. Se * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. -### Can I use keyboard shortcuts in the UI? +## Can I use keyboard shortcuts in the UI? Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. From 3e41103cba7788df3b49183f7b19af5b489759e9 Mon Sep 17 00:00:00 2001 From: martyav Date: Fri, 13 Sep 2024 16:27:02 -0400 Subject: [PATCH 07/31] fixed telemetry.md --- docs/faq/telemetry.md | 8 ++++---- versioned_docs/version-2.0-2.4/faq/telemetry.md | 8 ++++---- versioned_docs/version-2.5/faq/telemetry.md | 8 ++++---- versioned_docs/version-2.6/faq/telemetry.md | 8 ++++---- versioned_docs/version-2.7/faq/telemetry.md | 8 ++++---- versioned_docs/version-2.8/faq/telemetry.md | 8 ++++---- versioned_docs/version-2.9/faq/telemetry.md | 8 ++++---- 7 files changed, 28 insertions(+), 28 deletions(-) diff --git a/docs/faq/telemetry.md b/docs/faq/telemetry.md index edfcaebed4e7..64bcee4090f9 100644 --- a/docs/faq/telemetry.md +++ b/docs/faq/telemetry.md @@ -6,11 +6,11 @@ title: Telemetry FAQ -### What is Telemetry? +## What is Telemetry? Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. -### What information is collected? +## What information is collected? No specific identifying information like usernames, passwords, or the names or addresses of user resources will ever be collected. @@ -24,12 +24,12 @@ The primary things collected include: - The image name & version of Rancher that is running. - A unique randomly-generated identifier for this installation. -### Can I see the information that is being sent? +## Can I see the information that is being sent? If Telemetry is enabled, you can go to `https:///v1-telemetry` in your installation to see the current data. If Telemetry is not enabled, the process that collects the data is not running, so there is nothing being collected to look at. -### How do I turn it on or off? +## How do I turn it on or off? After initial setup, an administrator can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`. diff --git a/versioned_docs/version-2.0-2.4/faq/telemetry.md b/versioned_docs/version-2.0-2.4/faq/telemetry.md index edfcaebed4e7..64bcee4090f9 100644 --- a/versioned_docs/version-2.0-2.4/faq/telemetry.md +++ b/versioned_docs/version-2.0-2.4/faq/telemetry.md @@ -6,11 +6,11 @@ title: Telemetry FAQ -### What is Telemetry? +## What is Telemetry? Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. -### What information is collected? +## What information is collected? No specific identifying information like usernames, passwords, or the names or addresses of user resources will ever be collected. @@ -24,12 +24,12 @@ The primary things collected include: - The image name & version of Rancher that is running. - A unique randomly-generated identifier for this installation. -### Can I see the information that is being sent? +## Can I see the information that is being sent? If Telemetry is enabled, you can go to `https:///v1-telemetry` in your installation to see the current data. If Telemetry is not enabled, the process that collects the data is not running, so there is nothing being collected to look at. -### How do I turn it on or off? +## How do I turn it on or off? After initial setup, an administrator can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`. diff --git a/versioned_docs/version-2.5/faq/telemetry.md b/versioned_docs/version-2.5/faq/telemetry.md index edfcaebed4e7..64bcee4090f9 100644 --- a/versioned_docs/version-2.5/faq/telemetry.md +++ b/versioned_docs/version-2.5/faq/telemetry.md @@ -6,11 +6,11 @@ title: Telemetry FAQ -### What is Telemetry? +## What is Telemetry? Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. -### What information is collected? +## What information is collected? No specific identifying information like usernames, passwords, or the names or addresses of user resources will ever be collected. @@ -24,12 +24,12 @@ The primary things collected include: - The image name & version of Rancher that is running. - A unique randomly-generated identifier for this installation. -### Can I see the information that is being sent? +## Can I see the information that is being sent? If Telemetry is enabled, you can go to `https:///v1-telemetry` in your installation to see the current data. If Telemetry is not enabled, the process that collects the data is not running, so there is nothing being collected to look at. -### How do I turn it on or off? +## How do I turn it on or off? After initial setup, an administrator can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`. diff --git a/versioned_docs/version-2.6/faq/telemetry.md b/versioned_docs/version-2.6/faq/telemetry.md index edfcaebed4e7..64bcee4090f9 100644 --- a/versioned_docs/version-2.6/faq/telemetry.md +++ b/versioned_docs/version-2.6/faq/telemetry.md @@ -6,11 +6,11 @@ title: Telemetry FAQ -### What is Telemetry? +## What is Telemetry? Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. -### What information is collected? +## What information is collected? No specific identifying information like usernames, passwords, or the names or addresses of user resources will ever be collected. @@ -24,12 +24,12 @@ The primary things collected include: - The image name & version of Rancher that is running. - A unique randomly-generated identifier for this installation. -### Can I see the information that is being sent? +## Can I see the information that is being sent? If Telemetry is enabled, you can go to `https:///v1-telemetry` in your installation to see the current data. If Telemetry is not enabled, the process that collects the data is not running, so there is nothing being collected to look at. -### How do I turn it on or off? +## How do I turn it on or off? After initial setup, an administrator can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`. diff --git a/versioned_docs/version-2.7/faq/telemetry.md b/versioned_docs/version-2.7/faq/telemetry.md index edfcaebed4e7..64bcee4090f9 100644 --- a/versioned_docs/version-2.7/faq/telemetry.md +++ b/versioned_docs/version-2.7/faq/telemetry.md @@ -6,11 +6,11 @@ title: Telemetry FAQ -### What is Telemetry? +## What is Telemetry? Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. -### What information is collected? +## What information is collected? No specific identifying information like usernames, passwords, or the names or addresses of user resources will ever be collected. @@ -24,12 +24,12 @@ The primary things collected include: - The image name & version of Rancher that is running. - A unique randomly-generated identifier for this installation. -### Can I see the information that is being sent? +## Can I see the information that is being sent? If Telemetry is enabled, you can go to `https:///v1-telemetry` in your installation to see the current data. If Telemetry is not enabled, the process that collects the data is not running, so there is nothing being collected to look at. -### How do I turn it on or off? +## How do I turn it on or off? After initial setup, an administrator can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`. diff --git a/versioned_docs/version-2.8/faq/telemetry.md b/versioned_docs/version-2.8/faq/telemetry.md index edfcaebed4e7..64bcee4090f9 100644 --- a/versioned_docs/version-2.8/faq/telemetry.md +++ b/versioned_docs/version-2.8/faq/telemetry.md @@ -6,11 +6,11 @@ title: Telemetry FAQ -### What is Telemetry? +## What is Telemetry? Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. -### What information is collected? +## What information is collected? No specific identifying information like usernames, passwords, or the names or addresses of user resources will ever be collected. @@ -24,12 +24,12 @@ The primary things collected include: - The image name & version of Rancher that is running. - A unique randomly-generated identifier for this installation. -### Can I see the information that is being sent? +## Can I see the information that is being sent? If Telemetry is enabled, you can go to `https:///v1-telemetry` in your installation to see the current data. If Telemetry is not enabled, the process that collects the data is not running, so there is nothing being collected to look at. -### How do I turn it on or off? +## How do I turn it on or off? After initial setup, an administrator can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`. diff --git a/versioned_docs/version-2.9/faq/telemetry.md b/versioned_docs/version-2.9/faq/telemetry.md index edfcaebed4e7..64bcee4090f9 100644 --- a/versioned_docs/version-2.9/faq/telemetry.md +++ b/versioned_docs/version-2.9/faq/telemetry.md @@ -6,11 +6,11 @@ title: Telemetry FAQ -### What is Telemetry? +## What is Telemetry? Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. -### What information is collected? +## What information is collected? No specific identifying information like usernames, passwords, or the names or addresses of user resources will ever be collected. @@ -24,12 +24,12 @@ The primary things collected include: - The image name & version of Rancher that is running. - A unique randomly-generated identifier for this installation. -### Can I see the information that is being sent? +## Can I see the information that is being sent? If Telemetry is enabled, you can go to `https:///v1-telemetry` in your installation to see the current data. If Telemetry is not enabled, the process that collects the data is not running, so there is nothing being collected to look at. -### How do I turn it on or off? +## How do I turn it on or off? After initial setup, an administrator can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`. From a2e9bb2af16d2ee7a0e9c3b3b430febd7eab1cb1 Mon Sep 17 00:00:00 2001 From: martyav Date: Fri, 13 Sep 2024 16:32:26 -0400 Subject: [PATCH 08/31] fixed upgrades.md --- .../install-upgrade-on-a-kubernetes-cluster/upgrades.md | 2 -- .../upgrades/upgrades.md | 3 --- .../install-rancher-on-linux/upgrade-rancherd.md | 6 ++---- .../install-upgrade-on-a-kubernetes-cluster/upgrades.md | 2 -- .../install-upgrade-on-a-kubernetes-cluster/upgrades.md | 3 --- .../install-upgrade-on-a-kubernetes-cluster/upgrades.md | 2 -- .../install-upgrade-on-a-kubernetes-cluster/upgrades.md | 3 --- 7 files changed, 2 insertions(+), 19 deletions(-) diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md index 6a5107aea052..e0db870bdc3a 100644 --- a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md +++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md @@ -49,7 +49,6 @@ For [air-gapped installs only,](../other-installation-methods/air-gapped-helm-cl Follow the steps to upgrade Rancher server: - ### 1. Back up Your Kubernetes Cluster that is Running Rancher Server Use the [backup application](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) to back up Rancher. @@ -119,7 +118,6 @@ If you are installing Rancher in an air-gapped environment, skip the rest of thi ::: - Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. ``` diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/upgrades.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/upgrades.md index 23f37dc8231c..248be8165113 100644 --- a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/upgrades.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/upgrades.md @@ -108,7 +108,6 @@ You'll use the backup as a restoration point if something goes wrong during upgr > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories](../../resources/choose-a-rancher-version.md#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. - 1. Fetch the latest chart to install Rancher from the Helm chart repository. This command will pull down the latest charts and save it in the current directory as a `.tgz` file. @@ -188,7 +187,6 @@ If you are currently running the cert-manager whose version is older than v0.11, --set hostname=rancher.my.org ``` - @@ -203,7 +201,6 @@ Placeholder | Description `` | The DNS name for your private registry. `` | Cert-manager version running on k8s cluster. - ### Option A: Default Self-signed Certificate ```plain diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/install-rancher-on-linux/upgrade-rancherd.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/install-rancher-on-linux/upgrade-rancherd.md index 3d32fff306c1..160dae4fb00a 100644 --- a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/install-rancher-on-linux/upgrade-rancherd.md +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/install-rancher-on-linux/upgrade-rancherd.md @@ -10,7 +10,7 @@ During a RancherD upgrade, there is very little downtime, but it is possible tha When Rancher is installed with RancherD, the underlying Kubernetes cluster can't be upgraded from the Rancher UI. It needs to be upgraded using the RancherD CLI. -### Upgrading the Rancher Helm Chart without Upgrading the Underlying Cluster +## Upgrading the Rancher Helm Chart without Upgrading the Underlying Cluster To upgrade Rancher without upgrading the underlying Kubernetes cluster, follow these steps. @@ -38,7 +38,7 @@ To upgrade Rancher without upgrading the underlying Kubernetes cluster, follow t If necessary, restore Rancher from backup by following [these steps.](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md) -### Upgrading Both Rancher and the Underlying Cluster +## Upgrading Both Rancher and the Underlying Cluster Upgrade both RancherD and the underlying Kubernetes cluster by re-running the RancherD installation script. @@ -65,5 +65,3 @@ systemctl start rancherd-server ``` The upgrade can also be performed by manually installing the binary of the desired version. - - diff --git a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md index 913df3d96c4f..ad9642cdf24d 100644 --- a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md +++ b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md @@ -12,7 +12,6 @@ For the instructions to upgrade Rancher installed with Docker, refer to [this pa To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services](https://rancher.com/docs/rke/latest/en/config-options/services/) or [add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE](https://rancher.com/docs/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. - ## Prerequisites ### Access to kubeconfig @@ -46,7 +45,6 @@ For [air-gapped installs only,](../other-installation-methods/air-gapped-helm-cl Follow the steps to upgrade Rancher server: - ### 1. Back up Your Kubernetes Cluster that is Running Rancher Server Use the [backup application](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) to back up Rancher. diff --git a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md index bc75b3a46808..1d54f7dc3d16 100644 --- a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md +++ b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md @@ -12,7 +12,6 @@ For the instructions to upgrade Rancher installed with Docker, refer to [this pa To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services](https://rancher.com/docs/rke/latest/en/config-options/services/) or [add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE](https://rancher.com/docs/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. - ## Prerequisites ### Access to kubeconfig @@ -49,7 +48,6 @@ For [air-gapped installs only,](../other-installation-methods/air-gapped-helm-cl Follow the steps to upgrade Rancher server: - ### 1. Back up Your Kubernetes Cluster that is Running Rancher Server Use the [backup application](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) to back up Rancher. @@ -119,7 +117,6 @@ If you are installing Rancher in an air-gapped environment, skip the rest of thi ::: - Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. ``` diff --git a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md index b629e768d548..843aabd3a8f4 100644 --- a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md +++ b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md @@ -12,7 +12,6 @@ For the instructions to upgrade Rancher installed with Docker, refer to [this pa To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services](https://rancher.com/docs/rke/latest/en/config-options/services/) or [add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE](https://rancher.com/docs/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. - ## Prerequisites ### Access to kubeconfig @@ -119,7 +118,6 @@ If you are installing Rancher in an air-gapped environment, skip the rest of thi ::: - Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. ``` diff --git a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md index 6a5107aea052..4bdc983b4caf 100644 --- a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md +++ b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md @@ -12,7 +12,6 @@ For the instructions to upgrade Rancher installed with Docker, refer to [this pa To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services](https://rancher.com/docs/rke/latest/en/config-options/services/) or [add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE](https://rancher.com/docs/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. - ## Prerequisites ### Access to kubeconfig @@ -49,7 +48,6 @@ For [air-gapped installs only,](../other-installation-methods/air-gapped-helm-cl Follow the steps to upgrade Rancher server: - ### 1. Back up Your Kubernetes Cluster that is Running Rancher Server Use the [backup application](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) to back up Rancher. @@ -119,7 +117,6 @@ If you are installing Rancher in an air-gapped environment, skip the rest of thi ::: - Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. ``` From c6d25ea60f866e50b5393bc1a0b954b7ffdc69f9 Mon Sep 17 00:00:00 2001 From: martyav Date: Fri, 13 Sep 2024 16:51:18 -0400 Subject: [PATCH 09/31] fixed upgrade-kubernetes-without-upgrading-rancher.md --- .../upgrade-kubernetes-without-upgrading-rancher.md | 4 ++-- .../upgrade-kubernetes-without-upgrading-rancher.md | 5 +++-- .../upgrade-kubernetes-without-upgrading-rancher.md | 4 ++-- .../upgrade-kubernetes-without-upgrading-rancher.md | 4 ++-- .../upgrade-kubernetes-without-upgrading-rancher.md | 4 ++-- .../upgrade-kubernetes-without-upgrading-rancher.md | 4 ++-- 6 files changed, 13 insertions(+), 12 deletions(-) diff --git a/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md index a474f770b1b1..71b3a37204cd 100644 --- a/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md +++ b/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -36,7 +36,7 @@ Administrators might configure the RKE metadata settings to do the following: - Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub - Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher -### Refresh Kubernetes Metadata +## Refresh Kubernetes Metadata The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) @@ -74,7 +74,7 @@ If you don't have an air gap setup, you don't need to specify the URL where Ranc However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. -### Air Gap Setups +## Air Gap Setups Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md index 66308b50327f..ffe957ecb8e8 100644 --- a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -28,7 +28,7 @@ Administrators might configure the RKE metadata settings to do the following: - Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub - Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher -### Refresh Kubernetes Metadata +## Refresh Kubernetes Metadata The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) @@ -56,7 +56,8 @@ To edit the metadata config in Rancher, If you don't have an air gap setup, you don't need to specify the URL where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata/blob/dev-v2.5/data/data.json) However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. -### Air Gap Setups + +## Air Gap Setups Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) diff --git a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md index 89a3b7549f1a..d25a7f031967 100644 --- a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md +++ b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -36,7 +36,7 @@ Administrators might configure the RKE metadata settings to do the following: - Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub - Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher -### Refresh Kubernetes Metadata +## Refresh Kubernetes Metadata The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) @@ -74,7 +74,7 @@ If you don't have an air gap setup, you don't need to specify the URL where Ranc However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. -### Air Gap Setups +## Air Gap Setups Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) diff --git a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md index a474f770b1b1..71b3a37204cd 100644 --- a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md +++ b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -36,7 +36,7 @@ Administrators might configure the RKE metadata settings to do the following: - Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub - Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher -### Refresh Kubernetes Metadata +## Refresh Kubernetes Metadata The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) @@ -74,7 +74,7 @@ If you don't have an air gap setup, you don't need to specify the URL where Ranc However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. -### Air Gap Setups +## Air Gap Setups Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) diff --git a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md index a474f770b1b1..71b3a37204cd 100644 --- a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md +++ b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -36,7 +36,7 @@ Administrators might configure the RKE metadata settings to do the following: - Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub - Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher -### Refresh Kubernetes Metadata +## Refresh Kubernetes Metadata The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) @@ -74,7 +74,7 @@ If you don't have an air gap setup, you don't need to specify the URL where Ranc However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. -### Air Gap Setups +## Air Gap Setups Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) diff --git a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md index a474f770b1b1..71b3a37204cd 100644 --- a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md +++ b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -36,7 +36,7 @@ Administrators might configure the RKE metadata settings to do the following: - Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub - Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher -### Refresh Kubernetes Metadata +## Refresh Kubernetes Metadata The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) @@ -74,7 +74,7 @@ If you don't have an air gap setup, you don't need to specify the URL where Ranc However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. -### Air Gap Setups +## Air Gap Setups Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) From f26cce284ac1941f5360bd1626dd0191c3d082ab Mon Sep 17 00:00:00 2001 From: martyav Date: Mon, 16 Sep 2024 10:17:43 -0400 Subject: [PATCH 10/31] fixed air-gapped-upgrades.md --- .../air-gapped-upgrades.md | 2 +- .../air-gapped-upgrades.md | 2 +- .../air-gapped-upgrades.md | 2 +- .../air-gapped-upgrades.md | 2 +- .../air-gapped-upgrades.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md index b519ebf2761f..a3b48a0814d4 100644 --- a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md +++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md @@ -12,7 +12,7 @@ These instructions assume you have already followed the instructions for a Kuber ::: -### Rancher Helm Upgrade Options +## Rancher Helm Upgrade Options To upgrade with Helm, apply the same options that you used when installing Rancher. Refer to the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. diff --git a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md index f04a7df48b87..9ae9c1fa6a5d 100644 --- a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md +++ b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md @@ -12,7 +12,7 @@ These instructions assume you have already followed the instructions for a Kuber ::: -### Rancher Helm Upgrade Options +## Rancher Helm Upgrade Options To upgrade with Helm, apply the same options that you used when installing Rancher. Refer to the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. diff --git a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md index 82957c75b8bf..b529207f9381 100644 --- a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md +++ b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md @@ -12,7 +12,7 @@ These instructions assume you have already followed the instructions for a Kuber ::: -### Rancher Helm Upgrade Options +## Rancher Helm Upgrade Options To upgrade with Helm, apply the same options that you used when installing Rancher. Refer to the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. diff --git a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md index 82957c75b8bf..b529207f9381 100644 --- a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md +++ b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md @@ -12,7 +12,7 @@ These instructions assume you have already followed the instructions for a Kuber ::: -### Rancher Helm Upgrade Options +## Rancher Helm Upgrade Options To upgrade with Helm, apply the same options that you used when installing Rancher. Refer to the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. diff --git a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md index b519ebf2761f..a3b48a0814d4 100644 --- a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md +++ b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md @@ -12,7 +12,7 @@ These instructions assume you have already followed the instructions for a Kuber ::: -### Rancher Helm Upgrade Options +## Rancher Helm Upgrade Options To upgrade with Helm, apply the same options that you used when installing Rancher. Refer to the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. From 396c54fdaf279b0b4634bbc7ef146e2cc527faad Mon Sep 17 00:00:00 2001 From: martyav Date: Mon, 16 Sep 2024 10:34:57 -0400 Subject: [PATCH 11/31] fixed dockershim.md --- .../installation-requirements/dockershim.md | 2 +- .../installation-requirements/dockershim.md | 2 +- .../installation-requirements/dockershim.md | 2 +- .../installation-requirements/dockershim.md | 2 +- .../installation-requirements/dockershim.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/getting-started/installation-and-upgrade/installation-requirements/dockershim.md b/docs/getting-started/installation-and-upgrade/installation-requirements/dockershim.md index 067f7450143b..1578165e292d 100644 --- a/docs/getting-started/installation-and-upgrade/installation-requirements/dockershim.md +++ b/docs/getting-started/installation-and-upgrade/installation-requirements/dockershim.md @@ -22,7 +22,7 @@ Starting with version 1.24, the above defaults to true. For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher going forward. -### FAQ +## FAQ
diff --git a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/installation-requirements/dockershim.md b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/installation-requirements/dockershim.md index 067f7450143b..1578165e292d 100644 --- a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/installation-requirements/dockershim.md +++ b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/installation-requirements/dockershim.md @@ -22,7 +22,7 @@ Starting with version 1.24, the above defaults to true. For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher going forward. -### FAQ +## FAQ
diff --git a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/installation-requirements/dockershim.md b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/installation-requirements/dockershim.md index 067f7450143b..1578165e292d 100644 --- a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/installation-requirements/dockershim.md +++ b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/installation-requirements/dockershim.md @@ -22,7 +22,7 @@ Starting with version 1.24, the above defaults to true. For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher going forward. -### FAQ +## FAQ
diff --git a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/installation-requirements/dockershim.md b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/installation-requirements/dockershim.md index 067f7450143b..1578165e292d 100644 --- a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/installation-requirements/dockershim.md +++ b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/installation-requirements/dockershim.md @@ -22,7 +22,7 @@ Starting with version 1.24, the above defaults to true. For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher going forward. -### FAQ +## FAQ
diff --git a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/dockershim.md b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/dockershim.md index 067f7450143b..1578165e292d 100644 --- a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/dockershim.md +++ b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/dockershim.md @@ -22,7 +22,7 @@ Starting with version 1.24, the above defaults to true. For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher going forward. -### FAQ +## FAQ
From 25a5167f51b7e4e8bc6174f5a2701fab8d8d8dc8 Mon Sep 17 00:00:00 2001 From: martyav Date: Mon, 16 Sep 2024 10:38:25 -0400 Subject: [PATCH 12/31] fixed docker-install-commands.md --- .../docker-install-commands.md | 8 +++----- .../docker-install-commands.md | 11 ++++++----- .../docker-install-commands.md | 8 +++----- .../docker-install-commands.md | 2 -- .../docker-install-commands.md | 8 +++----- .../docker-install-commands.md | 6 +++--- 6 files changed, 18 insertions(+), 25 deletions(-) diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md index 53bbdc4e9cc7..e307e805b9eb 100644 --- a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md @@ -28,7 +28,7 @@ For security purposes, SSL (Secure Sockets Layer) is required when using Rancher Choose from the following options: -### Option A: Default Self-Signed Certificate +## Option A: Default Self-Signed Certificate
Click to expand @@ -55,7 +55,7 @@ docker run -d --restart=unless-stopped \
-### Option B: Bring Your Own Certificate: Self-Signed +## Option B: Bring Your Own Certificate: Self-Signed
Click to expand @@ -98,7 +98,7 @@ docker run -d --restart=unless-stopped \
-### Option C: Bring Your Own Certificate: Signed by Recognized CA +## Option C: Bring Your Own Certificate: Signed by Recognized CA
Click to expand @@ -143,8 +143,6 @@ docker run -d --restart=unless-stopped \
- - :::note If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md index e05cbdf1ac18..74948ff52bf8 100644 --- a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md @@ -26,7 +26,7 @@ For security purposes, SSL (Secure Sockets Layer) is required when using Rancher Choose from the following options: -### Option A: Default Self-Signed Certificate +## Option A: Default Self-Signed Certificate
Click to expand @@ -53,7 +53,7 @@ docker run -d --restart=unless-stopped \
-### Option B: Bring Your Own Certificate: Self-Signed +## Option B: Bring Your Own Certificate: Self-Signed
Click to expand @@ -93,7 +93,7 @@ docker run -d --restart=unless-stopped \
-### Option C: Bring Your Own Certificate: Signed by Recognized CA +## Option C: Bring Your Own Certificate: Signed by Recognized CA
Click to expand @@ -130,7 +130,8 @@ docker run -d --restart=unless-stopped \
+:::note +If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. -> **Note:** If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. - +::: diff --git a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md index 53bbdc4e9cc7..e307e805b9eb 100644 --- a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md +++ b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md @@ -28,7 +28,7 @@ For security purposes, SSL (Secure Sockets Layer) is required when using Rancher Choose from the following options: -### Option A: Default Self-Signed Certificate +## Option A: Default Self-Signed Certificate
Click to expand @@ -55,7 +55,7 @@ docker run -d --restart=unless-stopped \
-### Option B: Bring Your Own Certificate: Self-Signed +## Option B: Bring Your Own Certificate: Self-Signed
Click to expand @@ -98,7 +98,7 @@ docker run -d --restart=unless-stopped \
-### Option C: Bring Your Own Certificate: Signed by Recognized CA +## Option C: Bring Your Own Certificate: Signed by Recognized CA
Click to expand @@ -143,8 +143,6 @@ docker run -d --restart=unless-stopped \
- - :::note If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. diff --git a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md index 53bbdc4e9cc7..fedb80ddeb26 100644 --- a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md +++ b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md @@ -143,8 +143,6 @@ docker run -d --restart=unless-stopped \ - - :::note If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. diff --git a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md index 53bbdc4e9cc7..e307e805b9eb 100644 --- a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md +++ b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md @@ -28,7 +28,7 @@ For security purposes, SSL (Secure Sockets Layer) is required when using Rancher Choose from the following options: -### Option A: Default Self-Signed Certificate +## Option A: Default Self-Signed Certificate
Click to expand @@ -55,7 +55,7 @@ docker run -d --restart=unless-stopped \
-### Option B: Bring Your Own Certificate: Self-Signed +## Option B: Bring Your Own Certificate: Self-Signed
Click to expand @@ -98,7 +98,7 @@ docker run -d --restart=unless-stopped \
-### Option C: Bring Your Own Certificate: Signed by Recognized CA +## Option C: Bring Your Own Certificate: Signed by Recognized CA
Click to expand @@ -143,8 +143,6 @@ docker run -d --restart=unless-stopped \
- - :::note If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. diff --git a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md index 53bbdc4e9cc7..664928623673 100644 --- a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md +++ b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md @@ -28,7 +28,7 @@ For security purposes, SSL (Secure Sockets Layer) is required when using Rancher Choose from the following options: -### Option A: Default Self-Signed Certificate +## Option A: Default Self-Signed Certificate
Click to expand @@ -55,7 +55,7 @@ docker run -d --restart=unless-stopped \
-### Option B: Bring Your Own Certificate: Self-Signed +## Option B: Bring Your Own Certificate: Self-Signed
Click to expand @@ -98,7 +98,7 @@ docker run -d --restart=unless-stopped \
-### Option C: Bring Your Own Certificate: Signed by Recognized CA +## Option C: Bring Your Own Certificate: Signed by Recognized CA
Click to expand From 62d364535df600d95253fe190bab9dfe51e62171 Mon Sep 17 00:00:00 2001 From: martyav Date: Mon, 16 Sep 2024 12:01:04 -0400 Subject: [PATCH 13/31] fixed install-kubernetes.md --- .../install-kubernetes.md | 40 ++++++++++--------- .../install-kubernetes.md | 26 ++++++------ .../install-kubernetes.md | 26 ++++++------ .../install-kubernetes.md | 40 ++++++++++--------- .../install-kubernetes.md | 36 +++++++++-------- .../infrastructure-private-registry.md | 28 ++++++------- .../install-kubernetes.md | 40 ++++++++++--------- .../install-kubernetes.md | 40 ++++++++++--------- 8 files changed, 150 insertions(+), 126 deletions(-) diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md index 2189e94d095b..bef60c2fe4ae 100644 --- a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md @@ -23,14 +23,15 @@ The steps to set up an air-gapped Kubernetes cluster on RKE, RKE2, or K3s are sh In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. -### Installation Outline +## Installation Outline 1. [Prepare Images Directory](#1-prepare-images-directory) 2. [Create Registry YAML](#2-create-registry-yaml) 3. [Install K3s](#3-install-k3s) 4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) -### 1. Prepare Images Directory +## 1. Prepare Images Directory + Obtain the images tar file for your architecture from the [releases](https://github.com/k3s-io/k3s/releases) page for the version of K3s you will be running. Place the tar file in the `images` directory before starting K3s on each node, for example: @@ -40,7 +41,8 @@ sudo mkdir -p /var/lib/rancher/k3s/agent/images/ sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ ``` -### 2. Create Registry YAML +## 2. Create Registry YAML + Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. The registries.yaml file should look like this before plugging in the necessary information: @@ -66,7 +68,7 @@ Note, at this time only secure registries are supported with K3s (SSL with custo For more information on private registries configuration file for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) -### 3. Install K3s +## 3. Install K3s Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [Rancher Support Matrix](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/). @@ -98,7 +100,7 @@ K3s additionally provides a `--resolv-conf` flag for kubelets, which may help wi ::: -### 4. Save and Start Using the kubeconfig File +## 4. Save and Start Using the kubeconfig File When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. @@ -138,7 +140,7 @@ kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. -### Note on Upgrading +## Note on Upgrading Upgrading an air-gap environment can be accomplished in the following manner: @@ -151,14 +153,15 @@ Upgrading an air-gap environment can be accomplished in the following manner: In this guide, we are assuming you have created your nodes in your air-gapped environment and have a secure Docker private registry on your bastion server. -### Installation Outline +## Installation Outline 1. [Create RKE2 configuration](#1-create-rke2-configuration) 2. [Create Registry YAML](#2-create-registry-yaml) 3. [Install RKE2](#3-install-rke2) 4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) -### 1. Create RKE2 configuration +## 1. Create RKE2 configuration + Create the config.yaml file at `/etc/rancher/rke2/config.yaml`. This will contain all the configuration options necessary to create a highly available RKE2 cluster. On the first server the minimum config is: @@ -186,7 +189,8 @@ RKE2 additionally provides a `resolv-conf` option for kubelets, which may help w ::: -### 2. Create Registry YAML +## 2. Create Registry YAML + Create the registries.yaml file at `/etc/rancher/rke2/registries.yaml`. This will tell RKE2 the necessary details to connect to your private registry. The registries.yaml file should look like this before plugging in the necessary information: @@ -210,7 +214,7 @@ configs: For more information on private registries configuration file for RKE2, refer to the [RKE2 documentation.](https://docs.rke2.io/install/containerd_registry_configuration) -### 3. Install RKE2 +## 3. Install RKE2 Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) @@ -239,7 +243,7 @@ systemctl start rke2-server.service For more information, refer to the [RKE2 documentation](https://docs.rke2.io/install/airgap). -### 4. Save and Start Using the kubeconfig File +## 4. Save and Start Using the kubeconfig File When you installed RKE2 on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/rke2/rke2.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. @@ -279,7 +283,7 @@ kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces For more information about the `kubeconfig` file, refer to the [RKE2 documentation](https://docs.rke2.io/cluster_access) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. -### Note on Upgrading +## Note on Upgrading Upgrading an air-gap environment can be accomplished in the following manner: @@ -291,7 +295,7 @@ Upgrading an air-gap environment can be accomplished in the following manner: We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. -### 1. Install RKE +## 1. Install RKE Install RKE by following the instructions in the [RKE documentation.](https://rancher.com/docs/rke/latest/en/installation/) @@ -301,7 +305,7 @@ Certified version(s) of RKE based on the Rancher version can be found in the [Ra ::: -### 2. Create an RKE Config File +## 2. Create an RKE Config File From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. @@ -352,7 +356,7 @@ private_registries: is_default: true ``` -### 3. Run RKE +## 3. Run RKE After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: @@ -360,7 +364,7 @@ After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: rke up --config ./rancher-cluster.yml ``` -### 4. Save Your Files +## 4. Save Your Files :::note Important: @@ -383,8 +387,8 @@ The "rancher-cluster" parts of the two latter file names are dependent on how yo ::: -### Issues or errors? +## Issues or Errors? See the [Troubleshooting](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. -### [Next: Install Rancher](install-rancher-ha.md) +## [Next: Install Rancher](install-rancher-ha.md) diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md index 6d588c9a933b..acc3b651d5af 100644 --- a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md @@ -21,14 +21,15 @@ The steps to set up an air-gapped Kubernetes cluster on RKE or K3s are shown bel In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. -### Installation Outline +## Installation Outline 1. [Prepare Images Directory](#1-prepare-images-directory) 2. [Create Registry YAML](#2-create-registry-yaml) 3. [Install K3s](#3-install-k3s) 4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) -### 1. Prepare Images Directory +## 1. Prepare Images Directory + Obtain the images tar file for your architecture from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be running. Place the tar file in the `images` directory before starting K3s on each node, for example: @@ -38,7 +39,8 @@ sudo mkdir -p /var/lib/rancher/k3s/agent/images/ sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ ``` -### 2. Create Registry YAML +## 2. Create Registry YAML + Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. The registries.yaml file should look like this before plugging in the necessary information: @@ -64,7 +66,7 @@ Note, at this time only secure registries are supported with K3s (SSL with custo For more information on private registries configuration file for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) -### 3. Install K3s +## 3. Install K3s Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) @@ -93,7 +95,7 @@ The node-token is on the server at `/var/lib/rancher/k3s/server/node-token` >**Note:** K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. -### 4. Save and Start Using the kubeconfig File +## 4. Save and Start Using the kubeconfig File When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. @@ -133,7 +135,7 @@ kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. -### Note on Upgrading +## Note on Upgrading Upgrading an air-gap environment can be accomplished in the following manner: @@ -146,11 +148,11 @@ Upgrading an air-gap environment can be accomplished in the following manner: We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. -### 1. Install RKE +## 1. Install RKE Install RKE by following the instructions in the [RKE documentation.](https://rancher.com/docs/rke/latest/en/installation/) -### 2. Create an RKE Config File +## 2. Create an RKE Config File From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. @@ -197,7 +199,7 @@ private_registries: is_default: true ``` -### 3. Run RKE +## 3. Run RKE After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: @@ -205,7 +207,7 @@ After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: rke up --config ./rancher-cluster.yml ``` -### 4. Save Your Files +## 4. Save Your Files > **Important** > The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. @@ -221,8 +223,8 @@ Save a copy of the following files in a secure location: > **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. -### Issues or errors? +## Issues or Errors? See the [Troubleshooting](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. -### [Next: Install Rancher](install-rancher-ha.md) +## [Next: Install Rancher](install-rancher-ha.md) diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md index 928858bfdca7..737089f301cf 100644 --- a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md @@ -19,14 +19,15 @@ The steps to set up an air-gapped Kubernetes cluster on RKE or K3s are shown bel In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. -### Installation Outline +## Installation Outline 1. [Prepare Images Directory](#1-prepare-images-directory) 2. [Create Registry YAML](#2-create-registry-yaml) 3. [Install K3s](#3-install-k3s) 4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) -### 1. Prepare Images Directory +## 1. Prepare Images Directory + Obtain the images tar file for your architecture from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be running. Place the tar file in the `images` directory before starting K3s on each node, for example: @@ -36,7 +37,8 @@ sudo mkdir -p /var/lib/rancher/k3s/agent/images/ sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ ``` -### 2. Create Registry YAML +## 2. Create Registry YAML + Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. The registries.yaml file should look like this before plugging in the necessary information: @@ -62,7 +64,7 @@ Note, at this time only secure registries are supported with K3s (SSL with custo For more information on private registries configuration file for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) -### 3. Install K3s +## 3. Install K3s Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) @@ -91,7 +93,7 @@ The node-token is on the server at `/var/lib/rancher/k3s/server/node-token` >**Note:** K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. -### 4. Save and Start Using the kubeconfig File +## 4. Save and Start Using the kubeconfig File When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. @@ -131,7 +133,7 @@ kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. -### Note on Upgrading +## Note on Upgrading Upgrading an air-gap environment can be accomplished in the following manner: @@ -144,11 +146,11 @@ Upgrading an air-gap environment can be accomplished in the following manner: We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. -### 1. Install RKE +## 1. Install RKE Install RKE by following the instructions in the [RKE documentation.](https://rancher.com/docs/rke/latest/en/installation/) -### 2. Create an RKE Config File +## 2. Create an RKE Config File From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. @@ -195,7 +197,7 @@ private_registries: is_default: true ``` -### 3. Run RKE +## 3. Run RKE After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: @@ -203,7 +205,7 @@ After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: rke up --config ./rancher-cluster.yml ``` -### 4. Save Your Files +## 4. Save Your Files > **Important** > The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. @@ -219,8 +221,8 @@ Save a copy of the following files in a secure location: > **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. -### Issues or errors? +## Issues or Errors? See the [Troubleshooting](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. -### [Next: Install Rancher](install-rancher-ha.md) +## [Next: Install Rancher](install-rancher-ha.md) diff --git a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md index 2189e94d095b..bef60c2fe4ae 100644 --- a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md +++ b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md @@ -23,14 +23,15 @@ The steps to set up an air-gapped Kubernetes cluster on RKE, RKE2, or K3s are sh In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. -### Installation Outline +## Installation Outline 1. [Prepare Images Directory](#1-prepare-images-directory) 2. [Create Registry YAML](#2-create-registry-yaml) 3. [Install K3s](#3-install-k3s) 4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) -### 1. Prepare Images Directory +## 1. Prepare Images Directory + Obtain the images tar file for your architecture from the [releases](https://github.com/k3s-io/k3s/releases) page for the version of K3s you will be running. Place the tar file in the `images` directory before starting K3s on each node, for example: @@ -40,7 +41,8 @@ sudo mkdir -p /var/lib/rancher/k3s/agent/images/ sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ ``` -### 2. Create Registry YAML +## 2. Create Registry YAML + Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. The registries.yaml file should look like this before plugging in the necessary information: @@ -66,7 +68,7 @@ Note, at this time only secure registries are supported with K3s (SSL with custo For more information on private registries configuration file for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) -### 3. Install K3s +## 3. Install K3s Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [Rancher Support Matrix](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/). @@ -98,7 +100,7 @@ K3s additionally provides a `--resolv-conf` flag for kubelets, which may help wi ::: -### 4. Save and Start Using the kubeconfig File +## 4. Save and Start Using the kubeconfig File When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. @@ -138,7 +140,7 @@ kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. -### Note on Upgrading +## Note on Upgrading Upgrading an air-gap environment can be accomplished in the following manner: @@ -151,14 +153,15 @@ Upgrading an air-gap environment can be accomplished in the following manner: In this guide, we are assuming you have created your nodes in your air-gapped environment and have a secure Docker private registry on your bastion server. -### Installation Outline +## Installation Outline 1. [Create RKE2 configuration](#1-create-rke2-configuration) 2. [Create Registry YAML](#2-create-registry-yaml) 3. [Install RKE2](#3-install-rke2) 4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) -### 1. Create RKE2 configuration +## 1. Create RKE2 configuration + Create the config.yaml file at `/etc/rancher/rke2/config.yaml`. This will contain all the configuration options necessary to create a highly available RKE2 cluster. On the first server the minimum config is: @@ -186,7 +189,8 @@ RKE2 additionally provides a `resolv-conf` option for kubelets, which may help w ::: -### 2. Create Registry YAML +## 2. Create Registry YAML + Create the registries.yaml file at `/etc/rancher/rke2/registries.yaml`. This will tell RKE2 the necessary details to connect to your private registry. The registries.yaml file should look like this before plugging in the necessary information: @@ -210,7 +214,7 @@ configs: For more information on private registries configuration file for RKE2, refer to the [RKE2 documentation.](https://docs.rke2.io/install/containerd_registry_configuration) -### 3. Install RKE2 +## 3. Install RKE2 Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) @@ -239,7 +243,7 @@ systemctl start rke2-server.service For more information, refer to the [RKE2 documentation](https://docs.rke2.io/install/airgap). -### 4. Save and Start Using the kubeconfig File +## 4. Save and Start Using the kubeconfig File When you installed RKE2 on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/rke2/rke2.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. @@ -279,7 +283,7 @@ kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces For more information about the `kubeconfig` file, refer to the [RKE2 documentation](https://docs.rke2.io/cluster_access) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. -### Note on Upgrading +## Note on Upgrading Upgrading an air-gap environment can be accomplished in the following manner: @@ -291,7 +295,7 @@ Upgrading an air-gap environment can be accomplished in the following manner: We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. -### 1. Install RKE +## 1. Install RKE Install RKE by following the instructions in the [RKE documentation.](https://rancher.com/docs/rke/latest/en/installation/) @@ -301,7 +305,7 @@ Certified version(s) of RKE based on the Rancher version can be found in the [Ra ::: -### 2. Create an RKE Config File +## 2. Create an RKE Config File From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. @@ -352,7 +356,7 @@ private_registries: is_default: true ``` -### 3. Run RKE +## 3. Run RKE After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: @@ -360,7 +364,7 @@ After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: rke up --config ./rancher-cluster.yml ``` -### 4. Save Your Files +## 4. Save Your Files :::note Important: @@ -383,8 +387,8 @@ The "rancher-cluster" parts of the two latter file names are dependent on how yo ::: -### Issues or errors? +## Issues or Errors? See the [Troubleshooting](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. -### [Next: Install Rancher](install-rancher-ha.md) +## [Next: Install Rancher](install-rancher-ha.md) diff --git a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md index 2189e94d095b..aaac53cc0f95 100644 --- a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md +++ b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md @@ -30,7 +30,8 @@ In this guide, we are assuming you have created your nodes in your air gapped en 3. [Install K3s](#3-install-k3s) 4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) -### 1. Prepare Images Directory +## 1. Prepare Images Directory + Obtain the images tar file for your architecture from the [releases](https://github.com/k3s-io/k3s/releases) page for the version of K3s you will be running. Place the tar file in the `images` directory before starting K3s on each node, for example: @@ -40,7 +41,8 @@ sudo mkdir -p /var/lib/rancher/k3s/agent/images/ sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ ``` -### 2. Create Registry YAML +## 2. Create Registry YAML + Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. The registries.yaml file should look like this before plugging in the necessary information: @@ -66,7 +68,7 @@ Note, at this time only secure registries are supported with K3s (SSL with custo For more information on private registries configuration file for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) -### 3. Install K3s +## 3. Install K3s Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [Rancher Support Matrix](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/). @@ -98,7 +100,7 @@ K3s additionally provides a `--resolv-conf` flag for kubelets, which may help wi ::: -### 4. Save and Start Using the kubeconfig File +## 4. Save and Start Using the kubeconfig File When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. @@ -138,7 +140,7 @@ kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. -### Note on Upgrading +## Note on Upgrading Upgrading an air-gap environment can be accomplished in the following manner: @@ -151,14 +153,15 @@ Upgrading an air-gap environment can be accomplished in the following manner: In this guide, we are assuming you have created your nodes in your air-gapped environment and have a secure Docker private registry on your bastion server. -### Installation Outline +## Installation Outline 1. [Create RKE2 configuration](#1-create-rke2-configuration) 2. [Create Registry YAML](#2-create-registry-yaml) 3. [Install RKE2](#3-install-rke2) 4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) -### 1. Create RKE2 configuration +## 1. Create RKE2 configuration + Create the config.yaml file at `/etc/rancher/rke2/config.yaml`. This will contain all the configuration options necessary to create a highly available RKE2 cluster. On the first server the minimum config is: @@ -186,7 +189,8 @@ RKE2 additionally provides a `resolv-conf` option for kubelets, which may help w ::: -### 2. Create Registry YAML +## 2. Create Registry YAML + Create the registries.yaml file at `/etc/rancher/rke2/registries.yaml`. This will tell RKE2 the necessary details to connect to your private registry. The registries.yaml file should look like this before plugging in the necessary information: @@ -210,7 +214,7 @@ configs: For more information on private registries configuration file for RKE2, refer to the [RKE2 documentation.](https://docs.rke2.io/install/containerd_registry_configuration) -### 3. Install RKE2 +## 3. Install RKE2 Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) @@ -239,7 +243,7 @@ systemctl start rke2-server.service For more information, refer to the [RKE2 documentation](https://docs.rke2.io/install/airgap). -### 4. Save and Start Using the kubeconfig File +## 4. Save and Start Using the kubeconfig File When you installed RKE2 on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/rke2/rke2.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. @@ -279,7 +283,7 @@ kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces For more information about the `kubeconfig` file, refer to the [RKE2 documentation](https://docs.rke2.io/cluster_access) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. -### Note on Upgrading +## Note on Upgrading Upgrading an air-gap environment can be accomplished in the following manner: @@ -301,7 +305,7 @@ Certified version(s) of RKE based on the Rancher version can be found in the [Ra ::: -### 2. Create an RKE Config File +## 2. Create an RKE Config File From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. @@ -352,7 +356,7 @@ private_registries: is_default: true ``` -### 3. Run RKE +## 3. Run RKE After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: @@ -360,7 +364,7 @@ After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: rke up --config ./rancher-cluster.yml ``` -### 4. Save Your Files +## 4. Save Your Files :::note Important: @@ -383,8 +387,8 @@ The "rancher-cluster" parts of the two latter file names are dependent on how yo ::: -### Issues or errors? +## Issues or Errors? See the [Troubleshooting](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. -### [Next: Install Rancher](install-rancher-ha.md) +## [Next: Install Rancher](install-rancher-ha.md) diff --git a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md index 07b6b01097f7..aa70595d1af5 100644 --- a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md +++ b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md @@ -25,7 +25,7 @@ We recommend setting up the following infrastructure for a high-availability ins - **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - **A private image registry** to distribute container images to your machines. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -33,7 +33,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up External Datastore +## 2. Set up External Datastore The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. @@ -49,7 +49,7 @@ For an example of one way to set up the database, refer to this [tutorial](../.. For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) -### 3. Set up the Load Balancer +## 3. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -72,7 +72,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance ::: -### 4. Set up the DNS Record +## 4. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -82,7 +82,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 5. Set up a Private Image Registry +## 5. Set up a Private Image Registry Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing container images to your machines. @@ -106,13 +106,13 @@ To install the Rancher management server on a high-availability RKE cluster, we These nodes must be in the same region/data center. You may place these servers in separate availability zones. -### Why three nodes? +## Why Three Nodes? In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -120,7 +120,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up the Load Balancer +## 2. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -143,7 +143,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance ::: -### 3. Set up the DNS Record +## 3. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -153,7 +153,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 4. Set up a Private Image Registry +## 4. Set up a Private Image Registry Rancher supports air gap installs using a secure private registry. You must have your own private registry or other means of distributing container images to your machines. @@ -168,7 +168,7 @@ If you need to create a private registry, refer to the documentation pages for y -:::note Notes: +:::note - The Docker installation is for Rancher users that are wanting to test out Rancher. Since there is only one node and a single Docker container, if the node goes down, you will lose all the data of your Rancher server. @@ -176,7 +176,7 @@ If you need to create a private registry, refer to the documentation pages for y ::: -### 1. Set up a Linux Node +## 1. Set up a Linux Node This host will be disconnected from the Internet, but needs to be able to connect to your private registry. @@ -184,7 +184,7 @@ Make sure that your node fulfills the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up a Private Docker Registry +## 2. Set up a Private Docker Registry Rancher supports air gap installs using a private registry on your bastion server. You must have your own private registry or other means of distributing container images to your machines. @@ -193,4 +193,4 @@ If you need help with creating a private registry, please refer to the [official -### [Next: Collect and Publish Images to your Private Registry](publish-images.md) +## [Next: Collect and Publish Images to your Private Registry](publish-images.md) diff --git a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md index 2189e94d095b..bef60c2fe4ae 100644 --- a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md +++ b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md @@ -23,14 +23,15 @@ The steps to set up an air-gapped Kubernetes cluster on RKE, RKE2, or K3s are sh In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. -### Installation Outline +## Installation Outline 1. [Prepare Images Directory](#1-prepare-images-directory) 2. [Create Registry YAML](#2-create-registry-yaml) 3. [Install K3s](#3-install-k3s) 4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) -### 1. Prepare Images Directory +## 1. Prepare Images Directory + Obtain the images tar file for your architecture from the [releases](https://github.com/k3s-io/k3s/releases) page for the version of K3s you will be running. Place the tar file in the `images` directory before starting K3s on each node, for example: @@ -40,7 +41,8 @@ sudo mkdir -p /var/lib/rancher/k3s/agent/images/ sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ ``` -### 2. Create Registry YAML +## 2. Create Registry YAML + Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. The registries.yaml file should look like this before plugging in the necessary information: @@ -66,7 +68,7 @@ Note, at this time only secure registries are supported with K3s (SSL with custo For more information on private registries configuration file for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) -### 3. Install K3s +## 3. Install K3s Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [Rancher Support Matrix](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/). @@ -98,7 +100,7 @@ K3s additionally provides a `--resolv-conf` flag for kubelets, which may help wi ::: -### 4. Save and Start Using the kubeconfig File +## 4. Save and Start Using the kubeconfig File When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. @@ -138,7 +140,7 @@ kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. -### Note on Upgrading +## Note on Upgrading Upgrading an air-gap environment can be accomplished in the following manner: @@ -151,14 +153,15 @@ Upgrading an air-gap environment can be accomplished in the following manner: In this guide, we are assuming you have created your nodes in your air-gapped environment and have a secure Docker private registry on your bastion server. -### Installation Outline +## Installation Outline 1. [Create RKE2 configuration](#1-create-rke2-configuration) 2. [Create Registry YAML](#2-create-registry-yaml) 3. [Install RKE2](#3-install-rke2) 4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) -### 1. Create RKE2 configuration +## 1. Create RKE2 configuration + Create the config.yaml file at `/etc/rancher/rke2/config.yaml`. This will contain all the configuration options necessary to create a highly available RKE2 cluster. On the first server the minimum config is: @@ -186,7 +189,8 @@ RKE2 additionally provides a `resolv-conf` option for kubelets, which may help w ::: -### 2. Create Registry YAML +## 2. Create Registry YAML + Create the registries.yaml file at `/etc/rancher/rke2/registries.yaml`. This will tell RKE2 the necessary details to connect to your private registry. The registries.yaml file should look like this before plugging in the necessary information: @@ -210,7 +214,7 @@ configs: For more information on private registries configuration file for RKE2, refer to the [RKE2 documentation.](https://docs.rke2.io/install/containerd_registry_configuration) -### 3. Install RKE2 +## 3. Install RKE2 Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) @@ -239,7 +243,7 @@ systemctl start rke2-server.service For more information, refer to the [RKE2 documentation](https://docs.rke2.io/install/airgap). -### 4. Save and Start Using the kubeconfig File +## 4. Save and Start Using the kubeconfig File When you installed RKE2 on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/rke2/rke2.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. @@ -279,7 +283,7 @@ kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces For more information about the `kubeconfig` file, refer to the [RKE2 documentation](https://docs.rke2.io/cluster_access) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. -### Note on Upgrading +## Note on Upgrading Upgrading an air-gap environment can be accomplished in the following manner: @@ -291,7 +295,7 @@ Upgrading an air-gap environment can be accomplished in the following manner: We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. -### 1. Install RKE +## 1. Install RKE Install RKE by following the instructions in the [RKE documentation.](https://rancher.com/docs/rke/latest/en/installation/) @@ -301,7 +305,7 @@ Certified version(s) of RKE based on the Rancher version can be found in the [Ra ::: -### 2. Create an RKE Config File +## 2. Create an RKE Config File From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. @@ -352,7 +356,7 @@ private_registries: is_default: true ``` -### 3. Run RKE +## 3. Run RKE After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: @@ -360,7 +364,7 @@ After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: rke up --config ./rancher-cluster.yml ``` -### 4. Save Your Files +## 4. Save Your Files :::note Important: @@ -383,8 +387,8 @@ The "rancher-cluster" parts of the two latter file names are dependent on how yo ::: -### Issues or errors? +## Issues or Errors? See the [Troubleshooting](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. -### [Next: Install Rancher](install-rancher-ha.md) +## [Next: Install Rancher](install-rancher-ha.md) diff --git a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md index 2189e94d095b..bef60c2fe4ae 100644 --- a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md +++ b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md @@ -23,14 +23,15 @@ The steps to set up an air-gapped Kubernetes cluster on RKE, RKE2, or K3s are sh In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. -### Installation Outline +## Installation Outline 1. [Prepare Images Directory](#1-prepare-images-directory) 2. [Create Registry YAML](#2-create-registry-yaml) 3. [Install K3s](#3-install-k3s) 4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) -### 1. Prepare Images Directory +## 1. Prepare Images Directory + Obtain the images tar file for your architecture from the [releases](https://github.com/k3s-io/k3s/releases) page for the version of K3s you will be running. Place the tar file in the `images` directory before starting K3s on each node, for example: @@ -40,7 +41,8 @@ sudo mkdir -p /var/lib/rancher/k3s/agent/images/ sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ ``` -### 2. Create Registry YAML +## 2. Create Registry YAML + Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. The registries.yaml file should look like this before plugging in the necessary information: @@ -66,7 +68,7 @@ Note, at this time only secure registries are supported with K3s (SSL with custo For more information on private registries configuration file for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) -### 3. Install K3s +## 3. Install K3s Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [Rancher Support Matrix](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/). @@ -98,7 +100,7 @@ K3s additionally provides a `--resolv-conf` flag for kubelets, which may help wi ::: -### 4. Save and Start Using the kubeconfig File +## 4. Save and Start Using the kubeconfig File When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. @@ -138,7 +140,7 @@ kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. -### Note on Upgrading +## Note on Upgrading Upgrading an air-gap environment can be accomplished in the following manner: @@ -151,14 +153,15 @@ Upgrading an air-gap environment can be accomplished in the following manner: In this guide, we are assuming you have created your nodes in your air-gapped environment and have a secure Docker private registry on your bastion server. -### Installation Outline +## Installation Outline 1. [Create RKE2 configuration](#1-create-rke2-configuration) 2. [Create Registry YAML](#2-create-registry-yaml) 3. [Install RKE2](#3-install-rke2) 4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) -### 1. Create RKE2 configuration +## 1. Create RKE2 configuration + Create the config.yaml file at `/etc/rancher/rke2/config.yaml`. This will contain all the configuration options necessary to create a highly available RKE2 cluster. On the first server the minimum config is: @@ -186,7 +189,8 @@ RKE2 additionally provides a `resolv-conf` option for kubelets, which may help w ::: -### 2. Create Registry YAML +## 2. Create Registry YAML + Create the registries.yaml file at `/etc/rancher/rke2/registries.yaml`. This will tell RKE2 the necessary details to connect to your private registry. The registries.yaml file should look like this before plugging in the necessary information: @@ -210,7 +214,7 @@ configs: For more information on private registries configuration file for RKE2, refer to the [RKE2 documentation.](https://docs.rke2.io/install/containerd_registry_configuration) -### 3. Install RKE2 +## 3. Install RKE2 Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) @@ -239,7 +243,7 @@ systemctl start rke2-server.service For more information, refer to the [RKE2 documentation](https://docs.rke2.io/install/airgap). -### 4. Save and Start Using the kubeconfig File +## 4. Save and Start Using the kubeconfig File When you installed RKE2 on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/rke2/rke2.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. @@ -279,7 +283,7 @@ kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces For more information about the `kubeconfig` file, refer to the [RKE2 documentation](https://docs.rke2.io/cluster_access) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. -### Note on Upgrading +## Note on Upgrading Upgrading an air-gap environment can be accomplished in the following manner: @@ -291,7 +295,7 @@ Upgrading an air-gap environment can be accomplished in the following manner: We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. -### 1. Install RKE +## 1. Install RKE Install RKE by following the instructions in the [RKE documentation.](https://rancher.com/docs/rke/latest/en/installation/) @@ -301,7 +305,7 @@ Certified version(s) of RKE based on the Rancher version can be found in the [Ra ::: -### 2. Create an RKE Config File +## 2. Create an RKE Config File From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. @@ -352,7 +356,7 @@ private_registries: is_default: true ``` -### 3. Run RKE +## 3. Run RKE After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: @@ -360,7 +364,7 @@ After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: rke up --config ./rancher-cluster.yml ``` -### 4. Save Your Files +## 4. Save Your Files :::note Important: @@ -383,8 +387,8 @@ The "rancher-cluster" parts of the two latter file names are dependent on how yo ::: -### Issues or errors? +## Issues or Errors? See the [Troubleshooting](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. -### [Next: Install Rancher](install-rancher-ha.md) +## [Next: Install Rancher](install-rancher-ha.md) From 81b05018e15c95217b783f11d5830b5bc6f75ece Mon Sep 17 00:00:00 2001 From: martyav Date: Mon, 16 Sep 2024 12:19:12 -0400 Subject: [PATCH 14/31] fixed infrastructure-private-registry.md --- .../infrastructure-private-registry.md | 26 +++++++++---------- .../infrastructure-private-registry.md | 26 +++++++++---------- .../infrastructure-private-registry.md | 26 +++++++++---------- .../infrastructure-private-registry.md | 26 +++++++++---------- .../infrastructure-private-registry.md | 26 +++++++++---------- .../infrastructure-private-registry.md | 26 +++++++++---------- 6 files changed, 78 insertions(+), 78 deletions(-) diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md index 07b6b01097f7..40e21d3186aa 100644 --- a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md @@ -25,7 +25,7 @@ We recommend setting up the following infrastructure for a high-availability ins - **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - **A private image registry** to distribute container images to your machines. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -33,7 +33,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up External Datastore +## 2. Set up External Datastore The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. @@ -49,7 +49,7 @@ For an example of one way to set up the database, refer to this [tutorial](../.. For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) -### 3. Set up the Load Balancer +## 3. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -72,7 +72,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance ::: -### 4. Set up the DNS Record +## 4. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -82,7 +82,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 5. Set up a Private Image Registry +## 5. Set up a Private Image Registry Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing container images to your machines. @@ -106,13 +106,13 @@ To install the Rancher management server on a high-availability RKE cluster, we These nodes must be in the same region/data center. You may place these servers in separate availability zones. -### Why three nodes? +## Why Three Nodes? In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -120,7 +120,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up the Load Balancer +## 2. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -143,7 +143,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance ::: -### 3. Set up the DNS Record +## 3. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -153,7 +153,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 4. Set up a Private Image Registry +## 4. Set up a Private Image Registry Rancher supports air gap installs using a secure private registry. You must have your own private registry or other means of distributing container images to your machines. @@ -176,7 +176,7 @@ If you need to create a private registry, refer to the documentation pages for y ::: -### 1. Set up a Linux Node +## 1. Set up a Linux Node This host will be disconnected from the Internet, but needs to be able to connect to your private registry. @@ -184,7 +184,7 @@ Make sure that your node fulfills the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up a Private Docker Registry +## 2. Set up a Private Docker Registry Rancher supports air gap installs using a private registry on your bastion server. You must have your own private registry or other means of distributing container images to your machines. @@ -193,4 +193,4 @@ If you need help with creating a private registry, please refer to the [official -### [Next: Collect and Publish Images to your Private Registry](publish-images.md) +## [Next: Collect and Publish Images to your Private Registry](publish-images.md) diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md index f5cebd0477dd..ee7b4616c321 100644 --- a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md @@ -23,7 +23,7 @@ We recommend setting up the following infrastructure for a high-availability ins - **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - **A private Docker registry** to distribute Docker images to your machines. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -31,7 +31,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up External Datastore +## 2. Set up External Datastore The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. @@ -47,7 +47,7 @@ For an example of one way to set up the database, refer to this [tutorial](../.. For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) -### 3. Set up the Load Balancer +## 3. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -67,7 +67,7 @@ For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to > **Important:** > Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. -### 4. Set up the DNS Record +## 4. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -77,7 +77,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 5. Set up a Private Docker Registry +## 5. Set up a Private Docker Registry Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. @@ -97,13 +97,13 @@ To install the Rancher management server on a high-availability RKE cluster, we These nodes must be in the same region/data center. You may place these servers in separate availability zones. -### Why three nodes? +## Why Three Nodes? In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -111,7 +111,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up the Load Balancer +## 2. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -131,7 +131,7 @@ For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to > **Important:** > Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. -### 3. Set up the DNS Record +## 3. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -141,7 +141,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 4. Set up a Private Docker Registry +## 4. Set up a Private Docker Registry Rancher supports air gap installs using a secure Docker private registry. You must have your own private registry or other means of distributing Docker images to your machines. @@ -157,7 +157,7 @@ If you need help with creating a private registry, please refer to the [official > > For Rancher v2.0-v2.4, there is no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. -### 1. Set up a Linux Node +## 1. Set up a Linux Node This host will be disconnected from the Internet, but needs to be able to connect to your private registry. @@ -165,7 +165,7 @@ Make sure that your node fulfills the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up a Private Docker Registry +## 2. Set up a Private Docker Registry Rancher supports air gap installs using a Docker private registry on your bastion server. You must have your own private registry or other means of distributing Docker images to your machines. @@ -174,4 +174,4 @@ If you need help with creating a private registry, please refer to the [official -### [Next: Collect and Publish Images to your Private Registry](publish-images.md) +## [Next: Collect and Publish Images to your Private Registry](publish-images.md) diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md index af2944f70929..eab3b9d21f04 100644 --- a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md @@ -25,7 +25,7 @@ We recommend setting up the following infrastructure for a high-availability ins - **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - **A private Docker registry** to distribute Docker images to your machines. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -33,7 +33,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up External Datastore +## 2. Set up External Datastore The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. @@ -49,7 +49,7 @@ For an example of one way to set up the database, refer to this [tutorial](../.. For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) -### 3. Set up the Load Balancer +## 3. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -69,7 +69,7 @@ For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to > **Important:** > Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. -### 4. Set up the DNS Record +## 4. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -79,7 +79,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 5. Set up a Private Docker Registry +## 5. Set up a Private Docker Registry Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. @@ -99,13 +99,13 @@ To install the Rancher management server on a high-availability RKE cluster, we These nodes must be in the same region/data center. You may place these servers in separate availability zones. -### Why three nodes? +## Why Three Nodes? In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -113,7 +113,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up the Load Balancer +## 2. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -133,7 +133,7 @@ For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to > **Important:** > Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. -### 3. Set up the DNS Record +## 3. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -143,7 +143,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 4. Set up a Private Docker Registry +## 4. Set up a Private Docker Registry Rancher supports air gap installs using a secure Docker private registry. You must have your own private registry or other means of distributing Docker images to your machines. @@ -159,7 +159,7 @@ If you need help with creating a private registry, please refer to the [official > > As of Rancher v2.5, the Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) -### 1. Set up a Linux Node +## 1. Set up a Linux Node This host will be disconnected from the Internet, but needs to be able to connect to your private registry. @@ -167,7 +167,7 @@ Make sure that your node fulfills the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up a Private Docker Registry +## 2. Set up a Private Docker Registry Rancher supports air gap installs using a Docker private registry on your bastion server. You must have your own private registry or other means of distributing Docker images to your machines. @@ -176,4 +176,4 @@ If you need help with creating a private registry, please refer to the [official -### [Next: Collect and Publish Images to your Private Registry](publish-images.md) +## [Next: Collect and Publish Images to your Private Registry](publish-images.md) diff --git a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md index 53dd8408f5cb..ab2932ed8423 100644 --- a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md +++ b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md @@ -25,7 +25,7 @@ We recommend setting up the following infrastructure for a high-availability ins - **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - **A private Docker registry** to distribute Docker images to your machines. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -33,7 +33,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up External Datastore +## 2. Set up External Datastore The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. @@ -49,7 +49,7 @@ For an example of one way to set up the database, refer to this [tutorial](../.. For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) -### 3. Set up the Load Balancer +## 3. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -72,7 +72,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance ::: -### 4. Set up the DNS Record +## 4. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -82,7 +82,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 5. Set up a Private Docker Registry +## 5. Set up a Private Docker Registry Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. @@ -102,13 +102,13 @@ To install the Rancher management server on a high-availability RKE cluster, we These nodes must be in the same region/data center. You may place these servers in separate availability zones. -### Why three nodes? +## Why Three Nodes? In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -116,7 +116,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up the Load Balancer +## 2. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -139,7 +139,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance ::: -### 3. Set up the DNS Record +## 3. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -149,7 +149,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 4. Set up a Private Docker Registry +## 4. Set up a Private Docker Registry Rancher supports air gap installs using a secure Docker private registry. You must have your own private registry or other means of distributing Docker images to your machines. @@ -168,7 +168,7 @@ If you need help with creating a private registry, please refer to the [official ::: -### 1. Set up a Linux Node +## 1. Set up a Linux Node This host will be disconnected from the Internet, but needs to be able to connect to your private registry. @@ -176,7 +176,7 @@ Make sure that your node fulfills the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up a Private Docker Registry +## 2. Set up a Private Docker Registry Rancher supports air gap installs using a Docker private registry on your bastion server. You must have your own private registry or other means of distributing Docker images to your machines. @@ -185,4 +185,4 @@ If you need help with creating a private registry, please refer to the [official -### [Next: Collect and Publish Images to your Private Registry](publish-images.md) +## [Next: Collect and Publish Images to your Private Registry](publish-images.md) diff --git a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md index 07b6b01097f7..40e21d3186aa 100644 --- a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md +++ b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md @@ -25,7 +25,7 @@ We recommend setting up the following infrastructure for a high-availability ins - **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - **A private image registry** to distribute container images to your machines. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -33,7 +33,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up External Datastore +## 2. Set up External Datastore The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. @@ -49,7 +49,7 @@ For an example of one way to set up the database, refer to this [tutorial](../.. For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) -### 3. Set up the Load Balancer +## 3. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -72,7 +72,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance ::: -### 4. Set up the DNS Record +## 4. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -82,7 +82,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 5. Set up a Private Image Registry +## 5. Set up a Private Image Registry Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing container images to your machines. @@ -106,13 +106,13 @@ To install the Rancher management server on a high-availability RKE cluster, we These nodes must be in the same region/data center. You may place these servers in separate availability zones. -### Why three nodes? +## Why Three Nodes? In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -120,7 +120,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up the Load Balancer +## 2. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -143,7 +143,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance ::: -### 3. Set up the DNS Record +## 3. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -153,7 +153,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 4. Set up a Private Image Registry +## 4. Set up a Private Image Registry Rancher supports air gap installs using a secure private registry. You must have your own private registry or other means of distributing container images to your machines. @@ -176,7 +176,7 @@ If you need to create a private registry, refer to the documentation pages for y ::: -### 1. Set up a Linux Node +## 1. Set up a Linux Node This host will be disconnected from the Internet, but needs to be able to connect to your private registry. @@ -184,7 +184,7 @@ Make sure that your node fulfills the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up a Private Docker Registry +## 2. Set up a Private Docker Registry Rancher supports air gap installs using a private registry on your bastion server. You must have your own private registry or other means of distributing container images to your machines. @@ -193,4 +193,4 @@ If you need help with creating a private registry, please refer to the [official -### [Next: Collect and Publish Images to your Private Registry](publish-images.md) +## [Next: Collect and Publish Images to your Private Registry](publish-images.md) diff --git a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md index 07b6b01097f7..40e21d3186aa 100644 --- a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md +++ b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md @@ -25,7 +25,7 @@ We recommend setting up the following infrastructure for a high-availability ins - **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - **A private image registry** to distribute container images to your machines. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -33,7 +33,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up External Datastore +## 2. Set up External Datastore The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. @@ -49,7 +49,7 @@ For an example of one way to set up the database, refer to this [tutorial](../.. For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) -### 3. Set up the Load Balancer +## 3. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -72,7 +72,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance ::: -### 4. Set up the DNS Record +## 4. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -82,7 +82,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 5. Set up a Private Image Registry +## 5. Set up a Private Image Registry Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing container images to your machines. @@ -106,13 +106,13 @@ To install the Rancher management server on a high-availability RKE cluster, we These nodes must be in the same region/data center. You may place these servers in separate availability zones. -### Why three nodes? +## Why Three Nodes? In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. -### 1. Set up Linux Nodes +## 1. Set up Linux Nodes These hosts will be disconnected from the internet, but require being able to connect with your private registry. @@ -120,7 +120,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up the Load Balancer +## 2. Set up the Load Balancer You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. @@ -143,7 +143,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance ::: -### 3. Set up the DNS Record +## 3. Set up the DNS Record Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. @@ -153,7 +153,7 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### 4. Set up a Private Image Registry +## 4. Set up a Private Image Registry Rancher supports air gap installs using a secure private registry. You must have your own private registry or other means of distributing container images to your machines. @@ -176,7 +176,7 @@ If you need to create a private registry, refer to the documentation pages for y ::: -### 1. Set up a Linux Node +## 1. Set up a Linux Node This host will be disconnected from the Internet, but needs to be able to connect to your private registry. @@ -184,7 +184,7 @@ Make sure that your node fulfills the general installation requirements for [OS, For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. -### 2. Set up a Private Docker Registry +## 2. Set up a Private Docker Registry Rancher supports air gap installs using a private registry on your bastion server. You must have your own private registry or other means of distributing container images to your machines. @@ -193,4 +193,4 @@ If you need help with creating a private registry, please refer to the [official -### [Next: Collect and Publish Images to your Private Registry](publish-images.md) +## [Next: Collect and Publish Images to your Private Registry](publish-images.md) From 99dafb5dbda22c58febfe7a2ff61264cf5d730c0 Mon Sep 17 00:00:00 2001 From: martyav Date: Mon, 16 Sep 2024 14:17:39 -0400 Subject: [PATCH 15/31] fixed install-rancher-ha --- .../install-rancher-ha.md | 17 +- .../install-rancher-ha.md | 4 +- .../install-rancher-ha.md | 251 +++++++----------- .../install-rancher-ha.md | 2 +- .../install-rancher-ha.md | 2 +- .../install-rancher-ha.md | 14 +- .../install-rancher-ha.md | 14 +- 7 files changed, 120 insertions(+), 184 deletions(-) diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md index 99c4332b6339..7aa311907312 100644 --- a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -8,7 +8,7 @@ title: 4. Install Rancher This section is about how to deploy Rancher for your air gapped environment in a high-availability Kubernetes installation. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. -### Privileged Access for Rancher +## Privileged Access for Rancher When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. @@ -92,7 +92,7 @@ Recent changes to cert-manager require an upgrade. If you are upgrading Rancher ::: -##### 1. Add the cert-manager repo +##### 1. Add the cert-manager Repo From a system connected to the internet, add the cert-manager repo to Helm: @@ -101,7 +101,7 @@ helm repo add jetstack https://charts.jetstack.io helm repo update ``` -##### 2. Fetch the cert-manager chart +##### 2. Fetch the cert-manager Chart Fetch the latest cert-manager chart available from the [Helm chart repository](https://artifacthub.io/packages/helm/cert-manager/cert-manager). @@ -109,7 +109,7 @@ Fetch the latest cert-manager chart available from the [Helm chart repository](h helm fetch jetstack/cert-manager --version v1.11.0 ``` -##### 3. Retrieve the Cert-Manager CRDs +##### 3. Retrieve the cert-manager CRDs Download the required CRD file for cert-manager: ```plain @@ -120,7 +120,7 @@ Download the required CRD file for cert-manager: Copy the fetched charts to a system that has access to the Rancher server cluster to complete installation. -##### 1. Install Cert-Manager +#### 1. Install cert-manager Install cert-manager with the same options you would use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. @@ -160,7 +160,8 @@ If you are using self-signed certificates, install cert-manager:
-##### 2. Install Rancher +#### 2. Install Rancher + First, refer to [Adding TLS Secrets](../../resources/add-tls-secrets.md) to publish the certificate files so Rancher and the ingress controller can use them. Then, create the namespace for Rancher using kubectl: @@ -192,9 +193,9 @@ Placeholder | Description **Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8` -#### Option B: Certificates From Files using Kubernetes Secrets +#### Option B: Certificates From Files Using Kubernetes Secrets -##### 1. Create secrets +##### 1. Create Secrets Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md index f3b05323f805..43e3e044aaa7 100644 --- a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -182,7 +182,7 @@ Use `kubectl` to create namespaces and apply the rendered manifests. If you choose to use self-signed certificates in [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration), install cert-manager. -### For Self-Signed Certificate Installs, Install Cert-manager +### For Self-Signed Certificate Installs, Install cert-manager
Click to expand @@ -219,7 +219,7 @@ kubectl -n cattle-system apply -R -f ./rancher > **Note:** If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. -## 5. For Rancher versions before v2.3.0, Configure System Charts +## 5. For Rancher Versions Before v2.3.0, Configure System Charts If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in GitHub, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts](../../resources/local-system-charts.md). diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md index 6f3e4005c60b..7aa311907312 100644 --- a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -8,7 +8,7 @@ title: 4. Install Rancher This section is about how to deploy Rancher for your air gapped environment in a high-availability Kubernetes installation. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. -### Privileged Access for Rancher v2.5+ +## Privileged Access for Rancher When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. @@ -20,14 +20,7 @@ If you want to continue the air gapped installation using Docker commands, skip Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. -This section describes installing Rancher: - -- [1. Add the Helm Chart Repository](#1-add-the-helm-chart-repository) -- [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration) -- [3. Render the Rancher Helm Template](#3-render-the-rancher-helm-template) -- [4. Install Rancher](#4-install-rancher) - -## 1. Add the Helm Chart Repository +### 1. Add the Helm Chart Repository From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. @@ -44,9 +37,9 @@ From a system that has access to the internet, fetch the latest Helm chart and c ``` - Alpha: Experimental preview of upcoming releases. ``` - helm repo add rancher-stable https://releases.rancher.com/server-charts/stable + helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha ``` - Note: Upgrades are not supported to, from, or between Alphas. + Note: Upgrades are not supported to, from, or between Alphas. 3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. ```plain @@ -58,20 +51,24 @@ From a system that has access to the internet, fetch the latest Helm chart and c helm fetch rancher-stable/rancher --version=v2.4.8 ``` -## 2. Choose your SSL Configuration +### 2. Choose your SSL Configuration Rancher Server is designed to be secure by default and requires SSL/TLS configuration. When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). +:::note + +If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](../../installation-references/helm-chart-options.md#external-tls-termination). + +::: | Configuration | Chart option | Description | Requires cert-manager | | ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | | Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** and does not need to be added when rendering the Helm template. | yes | | Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
This option must be passed when rendering the Rancher Helm template. | no | -## Helm Chart Options for Air Gap Installations +### Helm Chart Options for Air Gap Installations When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. @@ -81,19 +78,21 @@ When setting up the Rancher Helm template, there are several options in the Helm | `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | | `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | -## 3. Render the Rancher Helm Template +### 3. Fetch the Cert-Manager Chart Based on the choice your made in [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration), complete one of the procedures below. -## Option A: Default Self-Signed Certificate - +#### Option A: Default Self-Signed Certificate By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. -> **Note:** -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation](../../resources/upgrade-cert-manager.md/). +:::note + +Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation](../../resources/upgrade-cert-manager.md). + +::: -### 1. Add the cert-manager repo +##### 1. Add the cert-manager Repo From a system connected to the internet, add the cert-manager repo to Helm: @@ -102,137 +101,118 @@ helm repo add jetstack https://charts.jetstack.io helm repo update ``` -### 2. Fetch the cert-manager chart +##### 2. Fetch the cert-manager Chart -Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). +Fetch the latest cert-manager chart available from the [Helm chart repository](https://artifacthub.io/packages/helm/cert-manager/cert-manager). ```plain -helm fetch jetstack/cert-manager --version v1.5.1 +helm fetch jetstack/cert-manager --version v1.11.0 ``` -### 3. Render the cert-manager template - -Render the cert-manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - -```plain -helm template cert-manager ./cert-manager-v1.5.1.tgz --output-dir . \ - --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller \ - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector \ - --set startupapicheck.image.repository=/quay.io/jetstack/cert-manager-ctl -``` - -### 4. Download the cert-manager CRD +##### 3. Retrieve the cert-manager CRDs Download the required CRD file for cert-manager: ```plain - curl -L -o cert-manager-crd.yaml https://github.com/jetstack/cert-manager/releases/download/v1.5.1/cert-manager.crds.yaml + curl -L -o cert-manager-crd.yaml https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.crds.yaml ``` -### 5. Render the Rancher template - -Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. +### 4. Install Rancher +Copy the fetched charts to a system that has access to the Rancher server cluster to complete installation. -Placeholder | Description -------------|------------- -`` | The version number of the output tarball. -`` | The DNS name you pointed at your load balancer. -`` | The DNS name for your private registry. -`` | Cert-manager version running on k8s cluster. +#### 1. Install cert-manager - - +Install cert-manager with the same options you would use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. -```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` +:::note -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8` +To see options on how to customize the cert-manager install (including for cases where your cluster uses PodSecurityPolicies), see the [cert-manager docs](https://artifacthub.io/packages/helm/cert-manager/cert-manager#configuration). - - +::: -```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.6` +
+ Click to expand - - +If you are using self-signed certificates, install cert-manager: +1. Create the namespace for cert-manager. + ```plain + kubectl create namespace cert-manager + ``` -## Option B: Certificates From Files using Kubernetes Secrets +2. Create the cert-manager CustomResourceDefinitions (CRDs). + ```plain + kubectl apply -f cert-manager-crd.yaml + ``` -### 1. Create secrets +3. Install cert-manager. -Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. + ```plain + helm install cert-manager ./cert-manager-v1.11.0.tgz \ + --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller \ + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector \ + --set startupapicheck.image.repository=/quay.io/jetstack/cert-manager-ctl + ``` -### 2. Render the Rancher template +
-Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. +#### 2. Install Rancher -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------- | -| `` | The version number of the output tarball. | -| `` | The DNS name you pointed at your load balancer. | -| `` | The DNS name for your private registry. | +First, refer to [Adding TLS Secrets](../../resources/add-tls-secrets.md) to publish the certificate files so Rancher and the ingress controller can use them. - - +Then, create the namespace for Rancher using kubectl: ```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts +kubectl create namespace cattle-system ``` -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: +Next, install Rancher, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + +For Kubernetes v1.25 or later, set `global.cattle.psp.enabled` to `false` when using Rancher v2.7.2-v2.7.4. This is not necessary for Rancher v2.7.5 and above, but you can still manually set the option if you choose. + +Placeholder | Description +------------|------------- +`` | The version number of the output tarball. +`` | The DNS name you pointed at your load balancer. +`` | The DNS name for your private registry. +`` | Cert-manager version running on k8s cluster. ```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated + helm install rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ + --set certmanager.version= \ --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set privateCA=true \ --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher --set useBundledSystemChart=true # Use the packaged Rancher system charts ``` -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` +**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8` + +#### Option B: Certificates From Files Using Kubernetes Secrets -Then refer to [Adding TLS Secrets](../../resources/add-tls-secrets.md/) to publish the certificate files so Rancher and the ingress controller can use them. +##### 1. Create Secrets + +Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. - - +##### 2. Install Rancher +Install Rancher, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + +For Kubernetes v1.25 or later, set `global.cattle.psp.enabled` to `false` when using Rancher v2.7.2-v2.7.4. This is not necessary for Rancher v2.7.5 and above, but you can still manually set the option if you choose. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------- | +| `` | The version number of the output tarball. | +| `` | The DNS name you pointed at your load balancer. | +| `` | The DNS name for your private registry. | ```plain - helm template rancher ./rancher-.tgz --output-dir . \ + helm install rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ @@ -244,7 +224,7 @@ Then refer to [Adding TLS Secrets](../../resources/add-tls-secrets.md/) to publi If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: ```plain - helm template rancher ./rancher-.tgz --output-dir . \ + helm install rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ @@ -254,64 +234,19 @@ If you are using a Private CA signed cert, add `--set privateCA=true` following --set useBundledSystemChart=true # Use the packaged Rancher system charts ``` -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` - -Then refer to [Adding TLS Secrets](../../resources/add-tls-secrets.md/) to publish the certificate files so Rancher and the ingress controller can use them. - - - - - - -## 4. Install Rancher - -Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. - -Use `kubectl` to create namespaces and apply the rendered manifests. - -If you choose to use self-signed certificates in [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration), install cert-manager. - -### For Self-Signed Certificate Installs, Install Cert-manager - -
- Click to expand - -If you are using self-signed certificates, install cert-manager: - -1. Create the namespace for cert-manager. -```plain -kubectl create namespace cert-manager -``` - -1. Create the cert-manager CustomResourceDefinitions (CRDs). -```plain -kubectl apply -f cert-manager/cert-manager-crd.yaml -``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above, or else you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Launch cert-manager. -```plain -kubectl apply -R -f ./cert-manager -``` - -
- -### Install Rancher with kubectl -```plain -kubectl create namespace cattle-system -kubectl -n cattle-system apply -R -f ./rancher -``` The installation is complete. +:::caution + +If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. -> **Note:** If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. +::: ## Additional Resources These resources could be helpful when installing Rancher: -- [Rancher Helm chart options](../../../../reference-guides/installation-references/helm-chart-options.md/) +- [Importing and installing extensions in an air-gapped environment](../../../../integrations-in-rancher/rancher-extensions.md#importing-and-installing-extensions-in-an-air-gapped-environment) +- [Rancher Helm chart options](../../installation-references/helm-chart-options.md) - [Adding TLS secrets](../../resources/add-tls-secrets.md) -- [Troubleshooting Rancher Kubernetes Installations](../../install-upgrade-on-a-kubernetes-cluster/upgrades.md) +- [Troubleshooting Rancher Kubernetes Installations](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) diff --git a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md index bbe3d40328dc..a70da5884c0b 100644 --- a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md +++ b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -185,7 +185,7 @@ Placeholder | Description **Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8` -#### Option B: Certificates From Files using Kubernetes Secrets +#### Option B: Certificates From Files Using Kubernetes Secrets ##### 1. Create secrets diff --git a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md index d09b5eca604c..8f7d7f13ef20 100644 --- a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md +++ b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -192,7 +192,7 @@ Placeholder | Description **Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8` -#### Option B: Certificates From Files using Kubernetes Secrets +#### Option B: Certificates From Files Using Kubernetes Secrets ##### 1. Create secrets diff --git a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md index 99c4332b6339..b61343218968 100644 --- a/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md +++ b/versioned_docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -8,7 +8,7 @@ title: 4. Install Rancher This section is about how to deploy Rancher for your air gapped environment in a high-availability Kubernetes installation. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. -### Privileged Access for Rancher +## Privileged Access for Rancher When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. @@ -78,7 +78,7 @@ When setting up the Rancher Helm template, there are several options in the Helm | `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | | `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | -### 3. Fetch the Cert-Manager Chart +### 3. Fetch the cert-manager Chart Based on the choice your made in [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration), complete one of the procedures below. @@ -109,7 +109,7 @@ Fetch the latest cert-manager chart available from the [Helm chart repository](h helm fetch jetstack/cert-manager --version v1.11.0 ``` -##### 3. Retrieve the Cert-Manager CRDs +##### 3. Retrieve the cert-manager CRDs Download the required CRD file for cert-manager: ```plain @@ -120,7 +120,7 @@ Download the required CRD file for cert-manager: Copy the fetched charts to a system that has access to the Rancher server cluster to complete installation. -##### 1. Install Cert-Manager +#### 1. Install cert-manager Install cert-manager with the same options you would use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. @@ -160,7 +160,7 @@ If you are using self-signed certificates, install cert-manager:
-##### 2. Install Rancher +#### 2. Install Rancher First, refer to [Adding TLS Secrets](../../resources/add-tls-secrets.md) to publish the certificate files so Rancher and the ingress controller can use them. Then, create the namespace for Rancher using kubectl: @@ -192,9 +192,9 @@ Placeholder | Description **Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8` -#### Option B: Certificates From Files using Kubernetes Secrets +#### Option B: Certificates From Files Using Kubernetes Secrets -##### 1. Create secrets +##### 1. Create Secrets Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. diff --git a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md index 99c4332b6339..b4767e79edec 100644 --- a/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md +++ b/versioned_docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -8,7 +8,7 @@ title: 4. Install Rancher This section is about how to deploy Rancher for your air gapped environment in a high-availability Kubernetes installation. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. -### Privileged Access for Rancher +## Privileged Access for Rancher When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. @@ -92,7 +92,7 @@ Recent changes to cert-manager require an upgrade. If you are upgrading Rancher ::: -##### 1. Add the cert-manager repo +##### 1. Add the cert-manager Repo From a system connected to the internet, add the cert-manager repo to Helm: @@ -101,7 +101,7 @@ helm repo add jetstack https://charts.jetstack.io helm repo update ``` -##### 2. Fetch the cert-manager chart +##### 2. Fetch the cert-manager Chart Fetch the latest cert-manager chart available from the [Helm chart repository](https://artifacthub.io/packages/helm/cert-manager/cert-manager). @@ -109,7 +109,7 @@ Fetch the latest cert-manager chart available from the [Helm chart repository](h helm fetch jetstack/cert-manager --version v1.11.0 ``` -##### 3. Retrieve the Cert-Manager CRDs +##### 3. Retrieve the cert-manager CRDs Download the required CRD file for cert-manager: ```plain @@ -120,7 +120,7 @@ Download the required CRD file for cert-manager: Copy the fetched charts to a system that has access to the Rancher server cluster to complete installation. -##### 1. Install Cert-Manager +##### 1. Install cert-manager Install cert-manager with the same options you would use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. @@ -192,9 +192,9 @@ Placeholder | Description **Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8` -#### Option B: Certificates From Files using Kubernetes Secrets +#### Option B: Certificates From Files Using Kubernetes Secrets -##### 1. Create secrets +##### 1. Create Secrets Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. From cb166412b811aa5ae9f82778a2f2168093b2f1af Mon Sep 17 00:00:00 2001 From: martyav Date: Mon, 16 Sep 2024 17:12:06 -0400 Subject: [PATCH 16/31] fixed manage-namespaces and tune-etcd-for-large-installs.md --- .../advanced-user-guides/tune-etcd-for-large-installs.md | 4 ++-- docs/how-to-guides/new-user-guides/manage-namespaces.md | 6 +++--- .../advanced-use-cases/tune-etcd-for-large-installs.md | 4 ++-- .../manage-projects/manage-namespaces.md | 6 +++--- .../advanced-use-cases/tune-etcd-for-large-installs.md | 4 ++-- .../manage-projects/manage-namespaces.md | 6 +++--- .../advanced-user-guides/tune-etcd-for-large-installs.md | 4 ++-- .../how-to-guides/new-user-guides/manage-namespaces.md | 6 +++--- .../advanced-user-guides/tune-etcd-for-large-installs.md | 4 ++-- .../how-to-guides/new-user-guides/manage-namespaces.md | 6 +++--- .../advanced-user-guides/tune-etcd-for-large-installs.md | 4 ++-- .../how-to-guides/new-user-guides/manage-namespaces.md | 6 +++--- .../advanced-user-guides/tune-etcd-for-large-installs.md | 4 ++-- .../how-to-guides/new-user-guides/manage-namespaces.md | 6 +++--- 14 files changed, 35 insertions(+), 35 deletions(-) diff --git a/docs/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/docs/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md index 60a87df09a41..27389737ab63 100644 --- a/docs/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md +++ b/docs/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md @@ -10,7 +10,7 @@ When Rancher is used to manage [a large infrastructure](../../getting-started/in The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.5/op-guide/maintenance/#space-quota) setting on the etcd servers. -### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB +## Example: This Snippet of the RKE Cluster.yml file Increases the Keyspace Size to 5GB ```yaml # RKE cluster.yml @@ -21,7 +21,7 @@ services: quota-backend-bytes: 5368709120 ``` -## Scaling etcd disk performance +## Scaling etcd Disk Performance You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.5/tuning/#disk) on how to tune the disk priority on the host. diff --git a/docs/how-to-guides/new-user-guides/manage-namespaces.md b/docs/how-to-guides/new-user-guides/manage-namespaces.md index 48ae6879c016..31c76b649853 100644 --- a/docs/how-to-guides/new-user-guides/manage-namespaces.md +++ b/docs/how-to-guides/new-user-guides/manage-namespaces.md @@ -29,7 +29,7 @@ If you create a namespace with `kubectl`, it may be unusable because `kubectl` d ::: -### Creating Namespaces +## Creating Namespaces Create a new namespace to isolate apps and resources in a project. @@ -50,7 +50,7 @@ When working with project resources that you can assign to a namespace (i.e., [w **Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. -### Moving Namespaces to Another Project +## Moving Namespaces to Another Project Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. @@ -71,7 +71,7 @@ Cluster admins and members may occasionally need to move a namespace to another **Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. -### Editing Namespace Resource Quotas +## Editing Namespace Resource Quotas You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md index b4c26dfbfa1c..3ab3df09ffa6 100644 --- a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md @@ -6,7 +6,7 @@ When running larger Rancher installations with 15 or more clusters it is recomme The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.3/op-guide/maintenance/#space-quota) setting on the etcd servers. -### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB +## Example: This Snippet of the RKE Cluster.yml file Increases the Keyspace Size to 5GB ```yaml # RKE cluster.yml @@ -17,7 +17,7 @@ services: quota-backend-bytes: 5368709120 ``` -## Scaling etcd disk performance +## Scaling etcd Disk Performance You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.3/tuning/#disk) on how to tune the disk priority on the host. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md index 32b7aa9dbbe6..39c00409a283 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md @@ -26,7 +26,7 @@ To manage permissions in a vanilla Kubernetes cluster, cluster admins configure > **Note:** If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher](manage-namespaces.md) to ensure that you will have permission to access the namespace. -### Creating Namespaces +## Creating Namespaces Create a new namespace to isolate apps and resources in a project. @@ -44,7 +44,7 @@ Create a new namespace to isolate apps and resources in a project. **Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. -### Moving Namespaces to Another Project +## Moving Namespaces to Another Project Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. @@ -64,7 +64,7 @@ Cluster admins and members may occasionally need to move a namespace to another **Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. -### Editing Namespace Resource Quotas +## Editing Namespace Resource Quotas You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md index b4c26dfbfa1c..3ab3df09ffa6 100644 --- a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md @@ -6,7 +6,7 @@ When running larger Rancher installations with 15 or more clusters it is recomme The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.3/op-guide/maintenance/#space-quota) setting on the etcd servers. -### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB +## Example: This Snippet of the RKE Cluster.yml file Increases the Keyspace Size to 5GB ```yaml # RKE cluster.yml @@ -17,7 +17,7 @@ services: quota-backend-bytes: 5368709120 ``` -## Scaling etcd disk performance +## Scaling etcd Disk Performance You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.3/tuning/#disk) on how to tune the disk priority on the host. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md index cebe18d26866..e0a308e61e6d 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md @@ -25,7 +25,7 @@ To manage permissions in a vanilla Kubernetes cluster, cluster admins configure > **Note:** If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher](manage-namespaces.md) to ensure that you will have permission to access the namespace. -### Creating Namespaces +## Creating Namespaces Create a new namespace to isolate apps and resources in a project. @@ -43,7 +43,7 @@ Create a new namespace to isolate apps and resources in a project. **Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. -### Moving Namespaces to Another Project +## Moving Namespaces to Another Project Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. @@ -63,7 +63,7 @@ Cluster admins and members may occasionally need to move a namespace to another **Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. -### Editing Namespace Resource Quotas +## Editing Namespace Resource Quotas You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md index 01cb40dc7d79..eb49a9945bbd 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md @@ -12,7 +12,7 @@ The etcd data set is automatically cleaned up on a five minute interval by Kuber Details about `quota-backend-bytes` differs by etcd version. For more information, see the [official etcd v3.5 documentation](https://etcd.io/docs/v3.5/op-guide/maintenance/#space-quota) if you're running Kubernetes v1.22 and later. Otherwise, see the [official etcd v3.4 documentation](https://etcd.io/docs/v3.4/op-guide/maintenance/#space-quota). -### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB +## Example: This Snippet of the RKE Cluster.yml file Increases the Keyspace Size to 5GB ```yaml # RKE cluster.yml @@ -23,7 +23,7 @@ services: quota-backend-bytes: 5368709120 ``` -## Scaling etcd disk performance +## Scaling etcd Disk Performance You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.5/tuning/#disk) on how to tune the disk priority on the host. diff --git a/versioned_docs/version-2.6/how-to-guides/new-user-guides/manage-namespaces.md b/versioned_docs/version-2.6/how-to-guides/new-user-guides/manage-namespaces.md index 48ae6879c016..31c76b649853 100644 --- a/versioned_docs/version-2.6/how-to-guides/new-user-guides/manage-namespaces.md +++ b/versioned_docs/version-2.6/how-to-guides/new-user-guides/manage-namespaces.md @@ -29,7 +29,7 @@ If you create a namespace with `kubectl`, it may be unusable because `kubectl` d ::: -### Creating Namespaces +## Creating Namespaces Create a new namespace to isolate apps and resources in a project. @@ -50,7 +50,7 @@ When working with project resources that you can assign to a namespace (i.e., [w **Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. -### Moving Namespaces to Another Project +## Moving Namespaces to Another Project Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. @@ -71,7 +71,7 @@ Cluster admins and members may occasionally need to move a namespace to another **Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. -### Editing Namespace Resource Quotas +## Editing Namespace Resource Quotas You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md index 60a87df09a41..27389737ab63 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md @@ -10,7 +10,7 @@ When Rancher is used to manage [a large infrastructure](../../getting-started/in The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.5/op-guide/maintenance/#space-quota) setting on the etcd servers. -### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB +## Example: This Snippet of the RKE Cluster.yml file Increases the Keyspace Size to 5GB ```yaml # RKE cluster.yml @@ -21,7 +21,7 @@ services: quota-backend-bytes: 5368709120 ``` -## Scaling etcd disk performance +## Scaling etcd Disk Performance You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.5/tuning/#disk) on how to tune the disk priority on the host. diff --git a/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-namespaces.md b/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-namespaces.md index 48ae6879c016..31c76b649853 100644 --- a/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-namespaces.md +++ b/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-namespaces.md @@ -29,7 +29,7 @@ If you create a namespace with `kubectl`, it may be unusable because `kubectl` d ::: -### Creating Namespaces +## Creating Namespaces Create a new namespace to isolate apps and resources in a project. @@ -50,7 +50,7 @@ When working with project resources that you can assign to a namespace (i.e., [w **Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. -### Moving Namespaces to Another Project +## Moving Namespaces to Another Project Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. @@ -71,7 +71,7 @@ Cluster admins and members may occasionally need to move a namespace to another **Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. -### Editing Namespace Resource Quotas +## Editing Namespace Resource Quotas You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. diff --git a/versioned_docs/version-2.8/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/versioned_docs/version-2.8/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md index 60a87df09a41..27389737ab63 100644 --- a/versioned_docs/version-2.8/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md +++ b/versioned_docs/version-2.8/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md @@ -10,7 +10,7 @@ When Rancher is used to manage [a large infrastructure](../../getting-started/in The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.5/op-guide/maintenance/#space-quota) setting on the etcd servers. -### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB +## Example: This Snippet of the RKE Cluster.yml file Increases the Keyspace Size to 5GB ```yaml # RKE cluster.yml @@ -21,7 +21,7 @@ services: quota-backend-bytes: 5368709120 ``` -## Scaling etcd disk performance +## Scaling etcd Disk Performance You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.5/tuning/#disk) on how to tune the disk priority on the host. diff --git a/versioned_docs/version-2.8/how-to-guides/new-user-guides/manage-namespaces.md b/versioned_docs/version-2.8/how-to-guides/new-user-guides/manage-namespaces.md index 48ae6879c016..31c76b649853 100644 --- a/versioned_docs/version-2.8/how-to-guides/new-user-guides/manage-namespaces.md +++ b/versioned_docs/version-2.8/how-to-guides/new-user-guides/manage-namespaces.md @@ -29,7 +29,7 @@ If you create a namespace with `kubectl`, it may be unusable because `kubectl` d ::: -### Creating Namespaces +## Creating Namespaces Create a new namespace to isolate apps and resources in a project. @@ -50,7 +50,7 @@ When working with project resources that you can assign to a namespace (i.e., [w **Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. -### Moving Namespaces to Another Project +## Moving Namespaces to Another Project Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. @@ -71,7 +71,7 @@ Cluster admins and members may occasionally need to move a namespace to another **Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. -### Editing Namespace Resource Quotas +## Editing Namespace Resource Quotas You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. diff --git a/versioned_docs/version-2.9/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/versioned_docs/version-2.9/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md index 60a87df09a41..27389737ab63 100644 --- a/versioned_docs/version-2.9/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md +++ b/versioned_docs/version-2.9/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md @@ -10,7 +10,7 @@ When Rancher is used to manage [a large infrastructure](../../getting-started/in The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.5/op-guide/maintenance/#space-quota) setting on the etcd servers. -### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB +## Example: This Snippet of the RKE Cluster.yml file Increases the Keyspace Size to 5GB ```yaml # RKE cluster.yml @@ -21,7 +21,7 @@ services: quota-backend-bytes: 5368709120 ``` -## Scaling etcd disk performance +## Scaling etcd Disk Performance You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.5/tuning/#disk) on how to tune the disk priority on the host. diff --git a/versioned_docs/version-2.9/how-to-guides/new-user-guides/manage-namespaces.md b/versioned_docs/version-2.9/how-to-guides/new-user-guides/manage-namespaces.md index 48ae6879c016..31c76b649853 100644 --- a/versioned_docs/version-2.9/how-to-guides/new-user-guides/manage-namespaces.md +++ b/versioned_docs/version-2.9/how-to-guides/new-user-guides/manage-namespaces.md @@ -29,7 +29,7 @@ If you create a namespace with `kubectl`, it may be unusable because `kubectl` d ::: -### Creating Namespaces +## Creating Namespaces Create a new namespace to isolate apps and resources in a project. @@ -50,7 +50,7 @@ When working with project resources that you can assign to a namespace (i.e., [w **Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. -### Moving Namespaces to Another Project +## Moving Namespaces to Another Project Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. @@ -71,7 +71,7 @@ Cluster admins and members may occasionally need to move a namespace to another **Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. -### Editing Namespace Resource Quotas +## Editing Namespace Resource Quotas You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. From 6daaed580e0d4f07d0e9c78fa7cc59bd23cee449 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 11:14:39 -0400 Subject: [PATCH 17/31] fixed cis-scans/configuration-reference.md --- .../cis-scans/configuration-reference.md | 6 +++--- .../cis-scans/configuration-reference.md | 6 +++--- .../cis-scans/configuration-reference.md | 6 +++--- .../cis-scans/configuration-reference.md | 6 +++--- .../cis-scans/configuration-reference.md | 6 +++--- .../cis-scans/configuration-reference.md | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/integrations-in-rancher/cis-scans/configuration-reference.md b/docs/integrations-in-rancher/cis-scans/configuration-reference.md index 0403956be56b..3394bc2702b9 100644 --- a/docs/integrations-in-rancher/cis-scans/configuration-reference.md +++ b/docs/integrations-in-rancher/cis-scans/configuration-reference.md @@ -14,7 +14,7 @@ To configure the custom resources, go to the **Cluster Dashboard** To configure 1. On the **Clusters** page, go to the cluster where you want to configure CIS scans and click **Explore**. 1. In the left navigation bar, click **CIS Benchmark**. -### Scans +## Scans A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed. @@ -31,7 +31,7 @@ spec: scanProfileName: rke-profile-hardened ``` -### Profiles +## Profiles A profile contains the configuration for the CIS scan, which includes the benchmark version to use and any specific tests to skip in that benchmark. @@ -66,7 +66,7 @@ spec: - "1.1.21" ``` -### Benchmark Versions +## Benchmark Versions A benchmark version is the name of benchmark to run using `kube-bench`, as well as the valid configuration parameters for that benchmark. diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/configuration-reference.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/configuration-reference.md index 74d88127011f..a412998b392f 100644 --- a/versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/configuration-reference.md +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/configuration-reference.md @@ -10,7 +10,7 @@ This configuration reference is intended to help you manage the custom resources To configure the custom resources, go to the **Cluster Explorer** in the Rancher UI. In dropdown menu in the top left corner, click **Cluster Explorer > CIS Benchmark.** -### Scans +## Scans A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed. @@ -27,7 +27,7 @@ spec: scanProfileName: rke-profile-hardened ``` -### Profiles +## Profiles A profile contains the configuration for the CIS scan, which includes the benchmark version to use and any specific tests to skip in that benchmark. @@ -58,7 +58,7 @@ spec: - "1.1.21" ``` -### Benchmark Versions +## Benchmark Versions A benchmark version is the name of benchmark to run using `kube-bench`, as well as the valid configuration parameters for that benchmark. diff --git a/versioned_docs/version-2.6/integrations-in-rancher/cis-scans/configuration-reference.md b/versioned_docs/version-2.6/integrations-in-rancher/cis-scans/configuration-reference.md index 0403956be56b..3394bc2702b9 100644 --- a/versioned_docs/version-2.6/integrations-in-rancher/cis-scans/configuration-reference.md +++ b/versioned_docs/version-2.6/integrations-in-rancher/cis-scans/configuration-reference.md @@ -14,7 +14,7 @@ To configure the custom resources, go to the **Cluster Dashboard** To configure 1. On the **Clusters** page, go to the cluster where you want to configure CIS scans and click **Explore**. 1. In the left navigation bar, click **CIS Benchmark**. -### Scans +## Scans A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed. @@ -31,7 +31,7 @@ spec: scanProfileName: rke-profile-hardened ``` -### Profiles +## Profiles A profile contains the configuration for the CIS scan, which includes the benchmark version to use and any specific tests to skip in that benchmark. @@ -66,7 +66,7 @@ spec: - "1.1.21" ``` -### Benchmark Versions +## Benchmark Versions A benchmark version is the name of benchmark to run using `kube-bench`, as well as the valid configuration parameters for that benchmark. diff --git a/versioned_docs/version-2.7/integrations-in-rancher/cis-scans/configuration-reference.md b/versioned_docs/version-2.7/integrations-in-rancher/cis-scans/configuration-reference.md index 0403956be56b..3394bc2702b9 100644 --- a/versioned_docs/version-2.7/integrations-in-rancher/cis-scans/configuration-reference.md +++ b/versioned_docs/version-2.7/integrations-in-rancher/cis-scans/configuration-reference.md @@ -14,7 +14,7 @@ To configure the custom resources, go to the **Cluster Dashboard** To configure 1. On the **Clusters** page, go to the cluster where you want to configure CIS scans and click **Explore**. 1. In the left navigation bar, click **CIS Benchmark**. -### Scans +## Scans A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed. @@ -31,7 +31,7 @@ spec: scanProfileName: rke-profile-hardened ``` -### Profiles +## Profiles A profile contains the configuration for the CIS scan, which includes the benchmark version to use and any specific tests to skip in that benchmark. @@ -66,7 +66,7 @@ spec: - "1.1.21" ``` -### Benchmark Versions +## Benchmark Versions A benchmark version is the name of benchmark to run using `kube-bench`, as well as the valid configuration parameters for that benchmark. diff --git a/versioned_docs/version-2.8/integrations-in-rancher/cis-scans/configuration-reference.md b/versioned_docs/version-2.8/integrations-in-rancher/cis-scans/configuration-reference.md index 0403956be56b..3394bc2702b9 100644 --- a/versioned_docs/version-2.8/integrations-in-rancher/cis-scans/configuration-reference.md +++ b/versioned_docs/version-2.8/integrations-in-rancher/cis-scans/configuration-reference.md @@ -14,7 +14,7 @@ To configure the custom resources, go to the **Cluster Dashboard** To configure 1. On the **Clusters** page, go to the cluster where you want to configure CIS scans and click **Explore**. 1. In the left navigation bar, click **CIS Benchmark**. -### Scans +## Scans A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed. @@ -31,7 +31,7 @@ spec: scanProfileName: rke-profile-hardened ``` -### Profiles +## Profiles A profile contains the configuration for the CIS scan, which includes the benchmark version to use and any specific tests to skip in that benchmark. @@ -66,7 +66,7 @@ spec: - "1.1.21" ``` -### Benchmark Versions +## Benchmark Versions A benchmark version is the name of benchmark to run using `kube-bench`, as well as the valid configuration parameters for that benchmark. diff --git a/versioned_docs/version-2.9/integrations-in-rancher/cis-scans/configuration-reference.md b/versioned_docs/version-2.9/integrations-in-rancher/cis-scans/configuration-reference.md index 0403956be56b..3394bc2702b9 100644 --- a/versioned_docs/version-2.9/integrations-in-rancher/cis-scans/configuration-reference.md +++ b/versioned_docs/version-2.9/integrations-in-rancher/cis-scans/configuration-reference.md @@ -14,7 +14,7 @@ To configure the custom resources, go to the **Cluster Dashboard** To configure 1. On the **Clusters** page, go to the cluster where you want to configure CIS scans and click **Explore**. 1. In the left navigation bar, click **CIS Benchmark**. -### Scans +## Scans A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed. @@ -31,7 +31,7 @@ spec: scanProfileName: rke-profile-hardened ``` -### Profiles +## Profiles A profile contains the configuration for the CIS scan, which includes the benchmark version to use and any specific tests to skip in that benchmark. @@ -66,7 +66,7 @@ spec: - "1.1.21" ``` -### Benchmark Versions +## Benchmark Versions A benchmark version is the name of benchmark to run using `kube-bench`, as well as the valid configuration parameters for that benchmark. From f4801a4aa83daa6fc245f1cec9db093965a80606 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 11:17:54 -0400 Subject: [PATCH 18/31] fixed custom-benchmark.md --- .../integrations-in-rancher/cis-scans/custom-benchmark.md | 8 ++++---- .../integrations-in-rancher/cis-scans/custom-benchmark.md | 8 ++++---- .../integrations-in-rancher/cis-scans/custom-benchmark.md | 8 ++++---- .../integrations-in-rancher/cis-scans/custom-benchmark.md | 8 ++++---- .../integrations-in-rancher/cis-scans/custom-benchmark.md | 8 ++++---- .../integrations-in-rancher/cis-scans/custom-benchmark.md | 8 ++++---- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/docs/integrations-in-rancher/cis-scans/custom-benchmark.md b/docs/integrations-in-rancher/cis-scans/custom-benchmark.md index 47853e45c147..4ec353cc60b4 100644 --- a/docs/integrations-in-rancher/cis-scans/custom-benchmark.md +++ b/docs/integrations-in-rancher/cis-scans/custom-benchmark.md @@ -17,7 +17,7 @@ When a cluster scan is run, you need to select a Profile which points to a speci Follow all the steps below to add a custom Benchmark Version and run a scan using it. -### 1. Prepare the Custom Benchmark Version ConfigMap +## 1. Prepare the Custom Benchmark Version ConfigMap To create a custom benchmark version, first you need to create a ConfigMap containing the benchmark version's config files and upload it to your Kubernetes cluster where you want to run the scan. @@ -42,7 +42,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom kubectl create configmap -n foo --from-file= ``` -### 2. Add a Custom Benchmark Version to a Cluster +## 2. Add a Custom Benchmark Version to a Cluster 1. In the upper left corner, click **☰ > Cluster Management**. 1. On the **Clusters** page, go to the cluster where you want to add a custom benchmark and click **Explore**. @@ -54,7 +54,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom 1. Add the minimum and maximum Kubernetes version limits applicable, if any. 1. Click **Create**. -### 3. Create a New Profile for the Custom Benchmark Version +## 3. Create a New Profile for the Custom Benchmark Version To run a scan using your custom benchmark version, you need to add a new Profile pointing to this benchmark version. @@ -66,7 +66,7 @@ To run a scan using your custom benchmark version, you need to add a new Profile 1. Choose the Benchmark Version from the dropdown. 1. Click **Create**. -### 4. Run a Scan Using the Custom Benchmark Version +## 4. Run a Scan Using the Custom Benchmark Version Once the Profile pointing to your custom benchmark version `foo` has been created, you can create a new Scan to run the custom test configs in the Benchmark Version. diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/custom-benchmark.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/custom-benchmark.md index 212d7b8d2c79..d268f383d59e 100644 --- a/versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/custom-benchmark.md +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/custom-benchmark.md @@ -19,7 +19,7 @@ When a cluster scan is run, you need to select a Profile which points to a speci Follow all the steps below to add a custom Benchmark Version and run a scan using it. -### 1. Prepare the Custom Benchmark Version ConfigMap +## 1. Prepare the Custom Benchmark Version ConfigMap To create a custom benchmark version, first you need to create a ConfigMap containing the benchmark version's config files and upload it to your Kubernetes cluster where you want to run the scan. @@ -44,7 +44,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom kubectl create configmap -n foo --from-file= ``` -### 2. Add a Custom Benchmark Version to a Cluster +## 2. Add a Custom Benchmark Version to a Cluster 1. Once the ConfigMap has been created in your cluster, navigate to the **Cluster Explorer** in the Rancher UI. 1. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** @@ -55,7 +55,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom 1. Add the minimum and maximum Kubernetes version limits applicable, if any. 1. Click **Create.** -### 3. Create a New Profile for the Custom Benchmark Version +## 3. Create a New Profile for the Custom Benchmark Version To run a scan using your custom benchmark version, you need to add a new Profile pointing to this benchmark version. @@ -66,7 +66,7 @@ To run a scan using your custom benchmark version, you need to add a new Profile 1. Choose the Benchmark Version `foo` from the dropdown. 1. Click **Create.** -### 4. Run a Scan Using the Custom Benchmark Version +## 4. Run a Scan Using the Custom Benchmark Version Once the Profile pointing to your custom benchmark version `foo` has been created, you can create a new Scan to run the custom test configs in the Benchmark Version. diff --git a/versioned_docs/version-2.6/integrations-in-rancher/cis-scans/custom-benchmark.md b/versioned_docs/version-2.6/integrations-in-rancher/cis-scans/custom-benchmark.md index 47853e45c147..4ec353cc60b4 100644 --- a/versioned_docs/version-2.6/integrations-in-rancher/cis-scans/custom-benchmark.md +++ b/versioned_docs/version-2.6/integrations-in-rancher/cis-scans/custom-benchmark.md @@ -17,7 +17,7 @@ When a cluster scan is run, you need to select a Profile which points to a speci Follow all the steps below to add a custom Benchmark Version and run a scan using it. -### 1. Prepare the Custom Benchmark Version ConfigMap +## 1. Prepare the Custom Benchmark Version ConfigMap To create a custom benchmark version, first you need to create a ConfigMap containing the benchmark version's config files and upload it to your Kubernetes cluster where you want to run the scan. @@ -42,7 +42,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom kubectl create configmap -n foo --from-file= ``` -### 2. Add a Custom Benchmark Version to a Cluster +## 2. Add a Custom Benchmark Version to a Cluster 1. In the upper left corner, click **☰ > Cluster Management**. 1. On the **Clusters** page, go to the cluster where you want to add a custom benchmark and click **Explore**. @@ -54,7 +54,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom 1. Add the minimum and maximum Kubernetes version limits applicable, if any. 1. Click **Create**. -### 3. Create a New Profile for the Custom Benchmark Version +## 3. Create a New Profile for the Custom Benchmark Version To run a scan using your custom benchmark version, you need to add a new Profile pointing to this benchmark version. @@ -66,7 +66,7 @@ To run a scan using your custom benchmark version, you need to add a new Profile 1. Choose the Benchmark Version from the dropdown. 1. Click **Create**. -### 4. Run a Scan Using the Custom Benchmark Version +## 4. Run a Scan Using the Custom Benchmark Version Once the Profile pointing to your custom benchmark version `foo` has been created, you can create a new Scan to run the custom test configs in the Benchmark Version. diff --git a/versioned_docs/version-2.7/integrations-in-rancher/cis-scans/custom-benchmark.md b/versioned_docs/version-2.7/integrations-in-rancher/cis-scans/custom-benchmark.md index 47853e45c147..4ec353cc60b4 100644 --- a/versioned_docs/version-2.7/integrations-in-rancher/cis-scans/custom-benchmark.md +++ b/versioned_docs/version-2.7/integrations-in-rancher/cis-scans/custom-benchmark.md @@ -17,7 +17,7 @@ When a cluster scan is run, you need to select a Profile which points to a speci Follow all the steps below to add a custom Benchmark Version and run a scan using it. -### 1. Prepare the Custom Benchmark Version ConfigMap +## 1. Prepare the Custom Benchmark Version ConfigMap To create a custom benchmark version, first you need to create a ConfigMap containing the benchmark version's config files and upload it to your Kubernetes cluster where you want to run the scan. @@ -42,7 +42,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom kubectl create configmap -n foo --from-file= ``` -### 2. Add a Custom Benchmark Version to a Cluster +## 2. Add a Custom Benchmark Version to a Cluster 1. In the upper left corner, click **☰ > Cluster Management**. 1. On the **Clusters** page, go to the cluster where you want to add a custom benchmark and click **Explore**. @@ -54,7 +54,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom 1. Add the minimum and maximum Kubernetes version limits applicable, if any. 1. Click **Create**. -### 3. Create a New Profile for the Custom Benchmark Version +## 3. Create a New Profile for the Custom Benchmark Version To run a scan using your custom benchmark version, you need to add a new Profile pointing to this benchmark version. @@ -66,7 +66,7 @@ To run a scan using your custom benchmark version, you need to add a new Profile 1. Choose the Benchmark Version from the dropdown. 1. Click **Create**. -### 4. Run a Scan Using the Custom Benchmark Version +## 4. Run a Scan Using the Custom Benchmark Version Once the Profile pointing to your custom benchmark version `foo` has been created, you can create a new Scan to run the custom test configs in the Benchmark Version. diff --git a/versioned_docs/version-2.8/integrations-in-rancher/cis-scans/custom-benchmark.md b/versioned_docs/version-2.8/integrations-in-rancher/cis-scans/custom-benchmark.md index 47853e45c147..4ec353cc60b4 100644 --- a/versioned_docs/version-2.8/integrations-in-rancher/cis-scans/custom-benchmark.md +++ b/versioned_docs/version-2.8/integrations-in-rancher/cis-scans/custom-benchmark.md @@ -17,7 +17,7 @@ When a cluster scan is run, you need to select a Profile which points to a speci Follow all the steps below to add a custom Benchmark Version and run a scan using it. -### 1. Prepare the Custom Benchmark Version ConfigMap +## 1. Prepare the Custom Benchmark Version ConfigMap To create a custom benchmark version, first you need to create a ConfigMap containing the benchmark version's config files and upload it to your Kubernetes cluster where you want to run the scan. @@ -42,7 +42,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom kubectl create configmap -n foo --from-file= ``` -### 2. Add a Custom Benchmark Version to a Cluster +## 2. Add a Custom Benchmark Version to a Cluster 1. In the upper left corner, click **☰ > Cluster Management**. 1. On the **Clusters** page, go to the cluster where you want to add a custom benchmark and click **Explore**. @@ -54,7 +54,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom 1. Add the minimum and maximum Kubernetes version limits applicable, if any. 1. Click **Create**. -### 3. Create a New Profile for the Custom Benchmark Version +## 3. Create a New Profile for the Custom Benchmark Version To run a scan using your custom benchmark version, you need to add a new Profile pointing to this benchmark version. @@ -66,7 +66,7 @@ To run a scan using your custom benchmark version, you need to add a new Profile 1. Choose the Benchmark Version from the dropdown. 1. Click **Create**. -### 4. Run a Scan Using the Custom Benchmark Version +## 4. Run a Scan Using the Custom Benchmark Version Once the Profile pointing to your custom benchmark version `foo` has been created, you can create a new Scan to run the custom test configs in the Benchmark Version. diff --git a/versioned_docs/version-2.9/integrations-in-rancher/cis-scans/custom-benchmark.md b/versioned_docs/version-2.9/integrations-in-rancher/cis-scans/custom-benchmark.md index 47853e45c147..4ec353cc60b4 100644 --- a/versioned_docs/version-2.9/integrations-in-rancher/cis-scans/custom-benchmark.md +++ b/versioned_docs/version-2.9/integrations-in-rancher/cis-scans/custom-benchmark.md @@ -17,7 +17,7 @@ When a cluster scan is run, you need to select a Profile which points to a speci Follow all the steps below to add a custom Benchmark Version and run a scan using it. -### 1. Prepare the Custom Benchmark Version ConfigMap +## 1. Prepare the Custom Benchmark Version ConfigMap To create a custom benchmark version, first you need to create a ConfigMap containing the benchmark version's config files and upload it to your Kubernetes cluster where you want to run the scan. @@ -42,7 +42,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom kubectl create configmap -n foo --from-file= ``` -### 2. Add a Custom Benchmark Version to a Cluster +## 2. Add a Custom Benchmark Version to a Cluster 1. In the upper left corner, click **☰ > Cluster Management**. 1. On the **Clusters** page, go to the cluster where you want to add a custom benchmark and click **Explore**. @@ -54,7 +54,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom 1. Add the minimum and maximum Kubernetes version limits applicable, if any. 1. Click **Create**. -### 3. Create a New Profile for the Custom Benchmark Version +## 3. Create a New Profile for the Custom Benchmark Version To run a scan using your custom benchmark version, you need to add a new Profile pointing to this benchmark version. @@ -66,7 +66,7 @@ To run a scan using your custom benchmark version, you need to add a new Profile 1. Choose the Benchmark Version from the dropdown. 1. Click **Create**. -### 4. Run a Scan Using the Custom Benchmark Version +## 4. Run a Scan Using the Custom Benchmark Version Once the Profile pointing to your custom benchmark version `foo` has been created, you can create a new Scan to run the custom test configs in the Benchmark Version. From 2c387e545704b4cab7a7b138e86824c5506d16c5 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 11:32:00 -0400 Subject: [PATCH 19/31] fixed supportconfig.md --- .../cloud-marketplace/supportconfig.md | 6 +++--- .../cloud-marketplace/supportconfig.md | 6 +++--- .../cloud-marketplace/supportconfig.md | 6 +++--- .../cloud-marketplace/supportconfig.md | 6 +++--- .../cloud-marketplace/supportconfig.md | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/integrations-in-rancher/cloud-marketplace/supportconfig.md b/docs/integrations-in-rancher/cloud-marketplace/supportconfig.md index 6eecac1132a7..4dbda92bf194 100644 --- a/docs/integrations-in-rancher/cloud-marketplace/supportconfig.md +++ b/docs/integrations-in-rancher/cloud-marketplace/supportconfig.md @@ -1,5 +1,5 @@ --- -title: Supportconfig bundle +title: Supportconfig Bundle --- @@ -12,7 +12,7 @@ These bundles can be created through Rancher or through direct access to the clu > **Note:** Only admin users can generate/download supportconfig bundles, regardless of method. -### Accessing through Rancher +## Accessing Through Rancher First, click on the hamburger menu. Then click the `Get Support` button. @@ -24,7 +24,7 @@ In the next page, click on the `Generate Support Config` button. ![Get Support](/img/generate-support-config.png) -### Accessing without rancher +## Accessing Without Rancher First, generate a kubeconfig for the cluster that Rancher is installed on. diff --git a/versioned_docs/version-2.6/integrations-in-rancher/cloud-marketplace/supportconfig.md b/versioned_docs/version-2.6/integrations-in-rancher/cloud-marketplace/supportconfig.md index 6eecac1132a7..b477ae8714f3 100644 --- a/versioned_docs/version-2.6/integrations-in-rancher/cloud-marketplace/supportconfig.md +++ b/versioned_docs/version-2.6/integrations-in-rancher/cloud-marketplace/supportconfig.md @@ -1,5 +1,5 @@ --- -title: Supportconfig bundle +title: Supportconfig Bundle --- @@ -12,7 +12,7 @@ These bundles can be created through Rancher or through direct access to the clu > **Note:** Only admin users can generate/download supportconfig bundles, regardless of method. -### Accessing through Rancher +### Accessing Through Rancher First, click on the hamburger menu. Then click the `Get Support` button. @@ -24,7 +24,7 @@ In the next page, click on the `Generate Support Config` button. ![Get Support](/img/generate-support-config.png) -### Accessing without rancher +### Accessing Without Rancher First, generate a kubeconfig for the cluster that Rancher is installed on. diff --git a/versioned_docs/version-2.7/integrations-in-rancher/cloud-marketplace/supportconfig.md b/versioned_docs/version-2.7/integrations-in-rancher/cloud-marketplace/supportconfig.md index 6eecac1132a7..4dbda92bf194 100644 --- a/versioned_docs/version-2.7/integrations-in-rancher/cloud-marketplace/supportconfig.md +++ b/versioned_docs/version-2.7/integrations-in-rancher/cloud-marketplace/supportconfig.md @@ -1,5 +1,5 @@ --- -title: Supportconfig bundle +title: Supportconfig Bundle --- @@ -12,7 +12,7 @@ These bundles can be created through Rancher or through direct access to the clu > **Note:** Only admin users can generate/download supportconfig bundles, regardless of method. -### Accessing through Rancher +## Accessing Through Rancher First, click on the hamburger menu. Then click the `Get Support` button. @@ -24,7 +24,7 @@ In the next page, click on the `Generate Support Config` button. ![Get Support](/img/generate-support-config.png) -### Accessing without rancher +## Accessing Without Rancher First, generate a kubeconfig for the cluster that Rancher is installed on. diff --git a/versioned_docs/version-2.8/integrations-in-rancher/cloud-marketplace/supportconfig.md b/versioned_docs/version-2.8/integrations-in-rancher/cloud-marketplace/supportconfig.md index 6eecac1132a7..4dbda92bf194 100644 --- a/versioned_docs/version-2.8/integrations-in-rancher/cloud-marketplace/supportconfig.md +++ b/versioned_docs/version-2.8/integrations-in-rancher/cloud-marketplace/supportconfig.md @@ -1,5 +1,5 @@ --- -title: Supportconfig bundle +title: Supportconfig Bundle --- @@ -12,7 +12,7 @@ These bundles can be created through Rancher or through direct access to the clu > **Note:** Only admin users can generate/download supportconfig bundles, regardless of method. -### Accessing through Rancher +## Accessing Through Rancher First, click on the hamburger menu. Then click the `Get Support` button. @@ -24,7 +24,7 @@ In the next page, click on the `Generate Support Config` button. ![Get Support](/img/generate-support-config.png) -### Accessing without rancher +## Accessing Without Rancher First, generate a kubeconfig for the cluster that Rancher is installed on. diff --git a/versioned_docs/version-2.9/integrations-in-rancher/cloud-marketplace/supportconfig.md b/versioned_docs/version-2.9/integrations-in-rancher/cloud-marketplace/supportconfig.md index 6eecac1132a7..4dbda92bf194 100644 --- a/versioned_docs/version-2.9/integrations-in-rancher/cloud-marketplace/supportconfig.md +++ b/versioned_docs/version-2.9/integrations-in-rancher/cloud-marketplace/supportconfig.md @@ -1,5 +1,5 @@ --- -title: Supportconfig bundle +title: Supportconfig Bundle --- @@ -12,7 +12,7 @@ These bundles can be created through Rancher or through direct access to the clu > **Note:** Only admin users can generate/download supportconfig bundles, regardless of method. -### Accessing through Rancher +## Accessing Through Rancher First, click on the hamburger menu. Then click the `Get Support` button. @@ -24,7 +24,7 @@ In the next page, click on the `Generate Support Config` button. ![Get Support](/img/generate-support-config.png) -### Accessing without rancher +## Accessing Without Rancher First, generate a kubeconfig for the cluster that Rancher is installed on. From 9a7255fe103044d5ae4013724a74862e418dd651 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 11:33:47 -0400 Subject: [PATCH 20/31] fixed harvester/overview.md --- docs/integrations-in-rancher/harvester/overview.md | 6 +++--- .../integrations-in-rancher/harvester/overview.md | 6 +++--- .../integrations-in-rancher/harvester/overview.md | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/integrations-in-rancher/harvester/overview.md b/docs/integrations-in-rancher/harvester/overview.md index d22afe15965e..edd54a6f5574 100644 --- a/docs/integrations-in-rancher/harvester/overview.md +++ b/docs/integrations-in-rancher/harvester/overview.md @@ -8,7 +8,7 @@ title: Overview Introduced in Rancher v2.6.1, [Harvester](https://docs.harvesterhci.io/) is an open-source hyper-converged infrastructure (HCI) software built on Kubernetes. Harvester installs on bare metal servers and provides integrated virtualization and distributed storage capabilities. Although Harvester operates using Kubernetes, it does not require users to know Kubernetes concepts, making it a more user-friendly application. -### Feature Flag +## Feature Flag The Harvester feature flag is used to manage access to the Virtualization Management (VM) page in Rancher where users can navigate directly to Harvester clusters and access the Harvester UI. The Harvester feature flag is enabled by default. Click [here](../../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md) for more information on feature flags in Rancher. @@ -22,7 +22,7 @@ To navigate to the Harvester cluster, click **☰ > Virtualization Management**. * Users may import a Harvester cluster only on the Virtualization Management page. Importing a cluster on the Cluster Management page is not supported, and a warning will advise you to return to the VM page to do so. -### Harvester Node Driver +## Harvester Node Driver The [Harvester node driver](https://docs.harvesterhci.io/v1.1/rancher/node/node-driver/) is generally available for RKE and RKE2 options in Rancher. The node driver is available whether or not the Harvester feature flag is enabled. Note that the node driver is off by default. Users may create RKE or RKE2 clusters on Harvester only from the Cluster Management page. @@ -30,7 +30,7 @@ Harvester allows `.ISO` images to be uploaded and displayed through the Harveste See [Provisioning Drivers](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md#node-drivers) for more information on node drivers in Rancher. -### Port Requirements +## Port Requirements The port requirements for the Harvester cluster can be found [here](https://docs.harvesterhci.io/v1.1/install/requirements#networking). diff --git a/versioned_docs/version-2.8/integrations-in-rancher/harvester/overview.md b/versioned_docs/version-2.8/integrations-in-rancher/harvester/overview.md index d22afe15965e..edd54a6f5574 100644 --- a/versioned_docs/version-2.8/integrations-in-rancher/harvester/overview.md +++ b/versioned_docs/version-2.8/integrations-in-rancher/harvester/overview.md @@ -8,7 +8,7 @@ title: Overview Introduced in Rancher v2.6.1, [Harvester](https://docs.harvesterhci.io/) is an open-source hyper-converged infrastructure (HCI) software built on Kubernetes. Harvester installs on bare metal servers and provides integrated virtualization and distributed storage capabilities. Although Harvester operates using Kubernetes, it does not require users to know Kubernetes concepts, making it a more user-friendly application. -### Feature Flag +## Feature Flag The Harvester feature flag is used to manage access to the Virtualization Management (VM) page in Rancher where users can navigate directly to Harvester clusters and access the Harvester UI. The Harvester feature flag is enabled by default. Click [here](../../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md) for more information on feature flags in Rancher. @@ -22,7 +22,7 @@ To navigate to the Harvester cluster, click **☰ > Virtualization Management**. * Users may import a Harvester cluster only on the Virtualization Management page. Importing a cluster on the Cluster Management page is not supported, and a warning will advise you to return to the VM page to do so. -### Harvester Node Driver +## Harvester Node Driver The [Harvester node driver](https://docs.harvesterhci.io/v1.1/rancher/node/node-driver/) is generally available for RKE and RKE2 options in Rancher. The node driver is available whether or not the Harvester feature flag is enabled. Note that the node driver is off by default. Users may create RKE or RKE2 clusters on Harvester only from the Cluster Management page. @@ -30,7 +30,7 @@ Harvester allows `.ISO` images to be uploaded and displayed through the Harveste See [Provisioning Drivers](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md#node-drivers) for more information on node drivers in Rancher. -### Port Requirements +## Port Requirements The port requirements for the Harvester cluster can be found [here](https://docs.harvesterhci.io/v1.1/install/requirements#networking). diff --git a/versioned_docs/version-2.9/integrations-in-rancher/harvester/overview.md b/versioned_docs/version-2.9/integrations-in-rancher/harvester/overview.md index d22afe15965e..edd54a6f5574 100644 --- a/versioned_docs/version-2.9/integrations-in-rancher/harvester/overview.md +++ b/versioned_docs/version-2.9/integrations-in-rancher/harvester/overview.md @@ -8,7 +8,7 @@ title: Overview Introduced in Rancher v2.6.1, [Harvester](https://docs.harvesterhci.io/) is an open-source hyper-converged infrastructure (HCI) software built on Kubernetes. Harvester installs on bare metal servers and provides integrated virtualization and distributed storage capabilities. Although Harvester operates using Kubernetes, it does not require users to know Kubernetes concepts, making it a more user-friendly application. -### Feature Flag +## Feature Flag The Harvester feature flag is used to manage access to the Virtualization Management (VM) page in Rancher where users can navigate directly to Harvester clusters and access the Harvester UI. The Harvester feature flag is enabled by default. Click [here](../../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md) for more information on feature flags in Rancher. @@ -22,7 +22,7 @@ To navigate to the Harvester cluster, click **☰ > Virtualization Management**. * Users may import a Harvester cluster only on the Virtualization Management page. Importing a cluster on the Cluster Management page is not supported, and a warning will advise you to return to the VM page to do so. -### Harvester Node Driver +## Harvester Node Driver The [Harvester node driver](https://docs.harvesterhci.io/v1.1/rancher/node/node-driver/) is generally available for RKE and RKE2 options in Rancher. The node driver is available whether or not the Harvester feature flag is enabled. Note that the node driver is off by default. Users may create RKE or RKE2 clusters on Harvester only from the Cluster Management page. @@ -30,7 +30,7 @@ Harvester allows `.ISO` images to be uploaded and displayed through the Harveste See [Provisioning Drivers](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md#node-drivers) for more information on node drivers in Rancher. -### Port Requirements +## Port Requirements The port requirements for the Harvester cluster can be found [here](https://docs.harvesterhci.io/v1.1/install/requirements#networking). From 5877a08c9eb1bf33a5291e1069cc3428e3af2bc9 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 11:35:45 -0400 Subject: [PATCH 21/31] fixed logging-architecture.md --- docs/integrations-in-rancher/logging/logging-architecture.md | 2 +- .../integrations-in-rancher/logging/logging-architecture.md | 2 +- .../integrations-in-rancher/logging/logging-architecture.md | 2 +- .../integrations-in-rancher/logging/logging-architecture.md | 2 +- .../integrations-in-rancher/logging/logging-architecture.md | 2 +- .../integrations-in-rancher/logging/logging-architecture.md | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/integrations-in-rancher/logging/logging-architecture.md b/docs/integrations-in-rancher/logging/logging-architecture.md index f4b716a6c2e0..ec56b8d1ef64 100644 --- a/docs/integrations-in-rancher/logging/logging-architecture.md +++ b/docs/integrations-in-rancher/logging/logging-architecture.md @@ -10,7 +10,7 @@ This section summarizes the architecture of the Rancher logging application. For more details about how the Logging operator works, see the [official documentation.](https://kube-logging.github.io/docs/#architecture) -### How the Logging Operator Works +## How the Logging Operator Works The Logging operator automates the deployment and configuration of a Kubernetes logging pipeline. It deploys and configures a Fluent Bit DaemonSet on every node to collect container and application logs from the node file system. diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-architecture.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-architecture.md index 2cb176749d47..15f99ac17713 100644 --- a/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-architecture.md +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-architecture.md @@ -21,7 +21,7 @@ The following changes were introduced to logging in Rancher v2.5: - We now support writing logs to multiple `Outputs`. - We now always collect Control Plane and etcd logs. -### How the Logging Operator Works +## How the Logging Operator Works The Logging operator automates the deployment and configuration of a Kubernetes logging pipeline. It deploys and configures a Fluent Bit DaemonSet on every node to collect container and application logs from the node file system. diff --git a/versioned_docs/version-2.6/integrations-in-rancher/logging/logging-architecture.md b/versioned_docs/version-2.6/integrations-in-rancher/logging/logging-architecture.md index 328418b47986..f79b15d8e554 100644 --- a/versioned_docs/version-2.6/integrations-in-rancher/logging/logging-architecture.md +++ b/versioned_docs/version-2.6/integrations-in-rancher/logging/logging-architecture.md @@ -10,7 +10,7 @@ This section summarizes the architecture of the Rancher logging application. For more details about how the Logging operator works, see the [official documentation.](https://kube-logging.github.io/docs/#architecture) -### How the Logging Operator Works +## How the Logging Operator Works The Logging operator automates the deployment and configuration of a Kubernetes logging pipeline. It deploys and configures a Fluent Bit DaemonSet on every node to collect container and application logs from the node file system. diff --git a/versioned_docs/version-2.7/integrations-in-rancher/logging/logging-architecture.md b/versioned_docs/version-2.7/integrations-in-rancher/logging/logging-architecture.md index f4b716a6c2e0..ec56b8d1ef64 100644 --- a/versioned_docs/version-2.7/integrations-in-rancher/logging/logging-architecture.md +++ b/versioned_docs/version-2.7/integrations-in-rancher/logging/logging-architecture.md @@ -10,7 +10,7 @@ This section summarizes the architecture of the Rancher logging application. For more details about how the Logging operator works, see the [official documentation.](https://kube-logging.github.io/docs/#architecture) -### How the Logging Operator Works +## How the Logging Operator Works The Logging operator automates the deployment and configuration of a Kubernetes logging pipeline. It deploys and configures a Fluent Bit DaemonSet on every node to collect container and application logs from the node file system. diff --git a/versioned_docs/version-2.8/integrations-in-rancher/logging/logging-architecture.md b/versioned_docs/version-2.8/integrations-in-rancher/logging/logging-architecture.md index f4b716a6c2e0..ec56b8d1ef64 100644 --- a/versioned_docs/version-2.8/integrations-in-rancher/logging/logging-architecture.md +++ b/versioned_docs/version-2.8/integrations-in-rancher/logging/logging-architecture.md @@ -10,7 +10,7 @@ This section summarizes the architecture of the Rancher logging application. For more details about how the Logging operator works, see the [official documentation.](https://kube-logging.github.io/docs/#architecture) -### How the Logging Operator Works +## How the Logging Operator Works The Logging operator automates the deployment and configuration of a Kubernetes logging pipeline. It deploys and configures a Fluent Bit DaemonSet on every node to collect container and application logs from the node file system. diff --git a/versioned_docs/version-2.9/integrations-in-rancher/logging/logging-architecture.md b/versioned_docs/version-2.9/integrations-in-rancher/logging/logging-architecture.md index f4b716a6c2e0..ec56b8d1ef64 100644 --- a/versioned_docs/version-2.9/integrations-in-rancher/logging/logging-architecture.md +++ b/versioned_docs/version-2.9/integrations-in-rancher/logging/logging-architecture.md @@ -10,7 +10,7 @@ This section summarizes the architecture of the Rancher logging application. For more details about how the Logging operator works, see the [official documentation.](https://kube-logging.github.io/docs/#architecture) -### How the Logging Operator Works +## How the Logging Operator Works The Logging operator automates the deployment and configuration of a Kubernetes logging pipeline. It deploys and configures a Fluent Bit DaemonSet on every node to collect container and application logs from the node file system. From f68992f9f28417c3624c631d8e9b73be9a536b20 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 11:40:17 -0400 Subject: [PATCH 22/31] fixed logging-helm-chart-options.md + rm'd unnecessary annotation title --- .../logging/logging-helm-chart-options.md | 14 +++++++------- .../logging/logging-helm-chart-options.md | 10 +++++----- .../logging/logging-helm-chart-options.md | 14 +++++++------- .../logging/logging-helm-chart-options.md | 12 ++++++------ .../logging/logging-helm-chart-options.md | 14 +++++++------- .../logging/logging-helm-chart-options.md | 14 +++++++------- 6 files changed, 39 insertions(+), 39 deletions(-) diff --git a/docs/integrations-in-rancher/logging/logging-helm-chart-options.md b/docs/integrations-in-rancher/logging/logging-helm-chart-options.md index d68865a3afce..40a2797b34b8 100644 --- a/docs/integrations-in-rancher/logging/logging-helm-chart-options.md +++ b/docs/integrations-in-rancher/logging/logging-helm-chart-options.md @@ -6,7 +6,7 @@ title: rancher-logging Helm Chart Options -### Enable/Disable Windows Node Logging +## Enable/Disable Windows Node Logging You can enable or disable Windows node logging by setting `global.cattle.windows.enabled` to either `true` or `false` in the `values.yaml`. @@ -21,7 +21,7 @@ Currently an [issue](https://github.com/rancher/rancher/issues/32325) exists whe ::: -### Working with a Custom Docker Root Directory +## Working with a Custom Docker Root Directory If using a custom Docker root directory, you can set `global.dockerRootDirectory` in `values.yaml`. @@ -31,11 +31,11 @@ Note that this only affects Linux nodes. If there are any Windows nodes in the cluster, the change will not be applicable to those nodes. -### Adding NodeSelector Settings and Tolerations for Custom Taints +## Adding NodeSelector Settings and Tolerations for Custom Taints You can add your own `nodeSelector` settings and add `tolerations` for additional taints by editing the logging Helm chart values. For details, see [this page.](taints-and-tolerations.md) -### Enabling the Logging Application to Work with SELinux +## Enabling the Logging Application to Work with SELinux :::note Requirements: @@ -49,7 +49,7 @@ To use Logging v2 with SELinux, we recommend installing the `rancher-selinux` RP Then, when installing the logging application, configure the chart to be SELinux aware by changing `global.seLinux.enabled` to `true` in the `values.yaml`. -### Additional Logging Sources +## Additional Logging Sources By default, Rancher collects logs for [control plane components](https://kubernetes.io/docs/concepts/overview/components/#control-plane-components) and [node components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for all cluster types. @@ -72,7 +72,7 @@ When enabled, Rancher collects all additional node and control plane logs the pr If you're already using a cloud provider's own logging solution such as AWS CloudWatch or Google Cloud operations suite (formerly Stackdriver), it is not necessary to enable this option as the native solution will have unrestricted access to all logs. -### Systemd Configuration +## Systemd Configuration In Rancher logging, `SystemdLogPath` must be configured for K3s and RKE2 Kubernetes distributions. @@ -87,7 +87,7 @@ K3s and RKE2 Kubernetes distributions log to journald, which is the subsystem of * If `/var/log/journal` exists, then use `/var/log/journal`. * If `/var/log/journal` does not exist, then use `/run/log/journal`. -:::note Notes: +:::note If any value not described above is returned, Rancher Logging will not be able to collect control plane logs. To address this issue, you will need to perform the following actions on every control plane node: diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-helm-chart-options.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-helm-chart-options.md index 817b1ddb9d49..20fc216dbe2d 100644 --- a/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-helm-chart-options.md +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-helm-chart-options.md @@ -6,7 +6,7 @@ title: rancher-logging Helm Chart Options -### Enable/Disable Windows Node Logging +## Enable/Disable Windows Node Logging _Available as of v2.5.8_ @@ -19,7 +19,7 @@ When disabled, logs will still be collected from Linux nodes within the Windows > Note: Currently an [issue](https://github.com/rancher/rancher/issues/32325) exists where Windows nodeAgents are not deleted when performing a `helm upgrade` after disabling Windows logging in a Windows cluster. In this scenario, users may need to manually remove the Windows nodeAgents if they are already installed. -### Working with a Custom Docker Root Directory +## Working with a Custom Docker Root Directory _Applies to v2.5.6+_ @@ -31,11 +31,11 @@ Note that this only affects Linux nodes. If there are any Windows nodes in the cluster, the change will not be applicable to those nodes. -### Adding NodeSelector Settings and Tolerations for Custom Taints +## Adding NodeSelector Settings and Tolerations for Custom Taints You can add your own `nodeSelector` settings and add `tolerations` for additional taints by editing the logging Helm chart values. For details, see [this page.](taints-and-tolerations.md) -### Enabling the Logging Application to Work with SELinux +## Enabling the Logging Application to Work with SELinux _Available as of v2.5.8_ @@ -47,7 +47,7 @@ To use Logging v2 with SELinux, we recommend installing the `rancher-selinux` RP Then, when installing the logging application, configure the chart to be SELinux aware by changing `global.seLinux.enabled` to `true` in the `values.yaml`. -### Additional Logging Sources +## Additional Logging Sources By default, Rancher collects logs for [control plane components](https://kubernetes.io/docs/concepts/overview/components/#control-plane-components) and [node components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for all cluster types. diff --git a/versioned_docs/version-2.6/integrations-in-rancher/logging/logging-helm-chart-options.md b/versioned_docs/version-2.6/integrations-in-rancher/logging/logging-helm-chart-options.md index ea93d80118fa..5148cf3c8bb9 100644 --- a/versioned_docs/version-2.6/integrations-in-rancher/logging/logging-helm-chart-options.md +++ b/versioned_docs/version-2.6/integrations-in-rancher/logging/logging-helm-chart-options.md @@ -6,7 +6,7 @@ title: rancher-logging Helm Chart Options -### Enable/Disable Windows Node Logging +## Enable/Disable Windows Node Logging You can enable or disable Windows node logging by setting `global.cattle.windows.enabled` to either `true` or `false` in the `values.yaml`. @@ -21,7 +21,7 @@ Currently an [issue](https://github.com/rancher/rancher/issues/32325) exists whe ::: -### Working with a Custom Docker Root Directory +## Working with a Custom Docker Root Directory If using a custom Docker root directory, you can set `global.dockerRootDirectory` in `values.yaml`. @@ -31,11 +31,11 @@ Note that this only affects Linux nodes. If there are any Windows nodes in the cluster, the change will not be applicable to those nodes. -### Adding NodeSelector Settings and Tolerations for Custom Taints +## Adding NodeSelector Settings and Tolerations for Custom Taints You can add your own `nodeSelector` settings and add `tolerations` for additional taints by editing the logging Helm chart values. For details, see [this page.](taints-and-tolerations.md) -### Enabling the Logging Application to Work with SELinux +## Enabling the Logging Application to Work with SELinux :::note Requirements: @@ -49,7 +49,7 @@ To use Logging v2 with SELinux, we recommend installing the `rancher-selinux` RP Then, when installing the logging application, configure the chart to be SELinux aware by changing `global.seLinux.enabled` to `true` in the `values.yaml`. -### Additional Logging Sources +## Additional Logging Sources By default, Rancher collects logs for [control plane components](https://kubernetes.io/docs/concepts/overview/components/#control-plane-components) and [node components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for all cluster types. @@ -72,7 +72,7 @@ When enabled, Rancher collects all additional node and control plane logs the pr If you're already using a cloud provider's own logging solution such as AWS CloudWatch or Google Cloud operations suite (formerly Stackdriver), it is not necessary to enable this option as the native solution will have unrestricted access to all logs. -### Systemd Configuration +## Systemd Configuration In Rancher logging, `SystemdLogPath` must be configured for K3s and RKE2 Kubernetes distributions. @@ -87,7 +87,7 @@ K3s and RKE2 Kubernetes distributions log to journald, which is the subsystem of * If `/var/log/journal` exists, then use `/var/log/journal`. * If `/var/log/journal` does not exist, then use `/run/log/journal`. -:::note Notes: +:::note If any value not described above is returned, Rancher Logging will not be able to collect control plane logs. To address this issue, you will need to perform the following actions on every control plane node: diff --git a/versioned_docs/version-2.7/integrations-in-rancher/logging/logging-helm-chart-options.md b/versioned_docs/version-2.7/integrations-in-rancher/logging/logging-helm-chart-options.md index d68865a3afce..ea3802d84d54 100644 --- a/versioned_docs/version-2.7/integrations-in-rancher/logging/logging-helm-chart-options.md +++ b/versioned_docs/version-2.7/integrations-in-rancher/logging/logging-helm-chart-options.md @@ -6,7 +6,7 @@ title: rancher-logging Helm Chart Options -### Enable/Disable Windows Node Logging +## Enable/Disable Windows Node Logging You can enable or disable Windows node logging by setting `global.cattle.windows.enabled` to either `true` or `false` in the `values.yaml`. @@ -21,7 +21,7 @@ Currently an [issue](https://github.com/rancher/rancher/issues/32325) exists whe ::: -### Working with a Custom Docker Root Directory +## Working with a Custom Docker Root Directory If using a custom Docker root directory, you can set `global.dockerRootDirectory` in `values.yaml`. @@ -31,11 +31,11 @@ Note that this only affects Linux nodes. If there are any Windows nodes in the cluster, the change will not be applicable to those nodes. -### Adding NodeSelector Settings and Tolerations for Custom Taints +## Adding NodeSelector Settings and Tolerations for Custom Taints You can add your own `nodeSelector` settings and add `tolerations` for additional taints by editing the logging Helm chart values. For details, see [this page.](taints-and-tolerations.md) -### Enabling the Logging Application to Work with SELinux +## Enabling the Logging Application to Work with SELinux :::note Requirements: @@ -49,7 +49,7 @@ To use Logging v2 with SELinux, we recommend installing the `rancher-selinux` RP Then, when installing the logging application, configure the chart to be SELinux aware by changing `global.seLinux.enabled` to `true` in the `values.yaml`. -### Additional Logging Sources +## Additional Logging Sources By default, Rancher collects logs for [control plane components](https://kubernetes.io/docs/concepts/overview/components/#control-plane-components) and [node components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for all cluster types. @@ -72,7 +72,7 @@ When enabled, Rancher collects all additional node and control plane logs the pr If you're already using a cloud provider's own logging solution such as AWS CloudWatch or Google Cloud operations suite (formerly Stackdriver), it is not necessary to enable this option as the native solution will have unrestricted access to all logs. -### Systemd Configuration +## Systemd Configuration In Rancher logging, `SystemdLogPath` must be configured for K3s and RKE2 Kubernetes distributions. diff --git a/versioned_docs/version-2.8/integrations-in-rancher/logging/logging-helm-chart-options.md b/versioned_docs/version-2.8/integrations-in-rancher/logging/logging-helm-chart-options.md index d68865a3afce..40a2797b34b8 100644 --- a/versioned_docs/version-2.8/integrations-in-rancher/logging/logging-helm-chart-options.md +++ b/versioned_docs/version-2.8/integrations-in-rancher/logging/logging-helm-chart-options.md @@ -6,7 +6,7 @@ title: rancher-logging Helm Chart Options -### Enable/Disable Windows Node Logging +## Enable/Disable Windows Node Logging You can enable or disable Windows node logging by setting `global.cattle.windows.enabled` to either `true` or `false` in the `values.yaml`. @@ -21,7 +21,7 @@ Currently an [issue](https://github.com/rancher/rancher/issues/32325) exists whe ::: -### Working with a Custom Docker Root Directory +## Working with a Custom Docker Root Directory If using a custom Docker root directory, you can set `global.dockerRootDirectory` in `values.yaml`. @@ -31,11 +31,11 @@ Note that this only affects Linux nodes. If there are any Windows nodes in the cluster, the change will not be applicable to those nodes. -### Adding NodeSelector Settings and Tolerations for Custom Taints +## Adding NodeSelector Settings and Tolerations for Custom Taints You can add your own `nodeSelector` settings and add `tolerations` for additional taints by editing the logging Helm chart values. For details, see [this page.](taints-and-tolerations.md) -### Enabling the Logging Application to Work with SELinux +## Enabling the Logging Application to Work with SELinux :::note Requirements: @@ -49,7 +49,7 @@ To use Logging v2 with SELinux, we recommend installing the `rancher-selinux` RP Then, when installing the logging application, configure the chart to be SELinux aware by changing `global.seLinux.enabled` to `true` in the `values.yaml`. -### Additional Logging Sources +## Additional Logging Sources By default, Rancher collects logs for [control plane components](https://kubernetes.io/docs/concepts/overview/components/#control-plane-components) and [node components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for all cluster types. @@ -72,7 +72,7 @@ When enabled, Rancher collects all additional node and control plane logs the pr If you're already using a cloud provider's own logging solution such as AWS CloudWatch or Google Cloud operations suite (formerly Stackdriver), it is not necessary to enable this option as the native solution will have unrestricted access to all logs. -### Systemd Configuration +## Systemd Configuration In Rancher logging, `SystemdLogPath` must be configured for K3s and RKE2 Kubernetes distributions. @@ -87,7 +87,7 @@ K3s and RKE2 Kubernetes distributions log to journald, which is the subsystem of * If `/var/log/journal` exists, then use `/var/log/journal`. * If `/var/log/journal` does not exist, then use `/run/log/journal`. -:::note Notes: +:::note If any value not described above is returned, Rancher Logging will not be able to collect control plane logs. To address this issue, you will need to perform the following actions on every control plane node: diff --git a/versioned_docs/version-2.9/integrations-in-rancher/logging/logging-helm-chart-options.md b/versioned_docs/version-2.9/integrations-in-rancher/logging/logging-helm-chart-options.md index d68865a3afce..40a2797b34b8 100644 --- a/versioned_docs/version-2.9/integrations-in-rancher/logging/logging-helm-chart-options.md +++ b/versioned_docs/version-2.9/integrations-in-rancher/logging/logging-helm-chart-options.md @@ -6,7 +6,7 @@ title: rancher-logging Helm Chart Options -### Enable/Disable Windows Node Logging +## Enable/Disable Windows Node Logging You can enable or disable Windows node logging by setting `global.cattle.windows.enabled` to either `true` or `false` in the `values.yaml`. @@ -21,7 +21,7 @@ Currently an [issue](https://github.com/rancher/rancher/issues/32325) exists whe ::: -### Working with a Custom Docker Root Directory +## Working with a Custom Docker Root Directory If using a custom Docker root directory, you can set `global.dockerRootDirectory` in `values.yaml`. @@ -31,11 +31,11 @@ Note that this only affects Linux nodes. If there are any Windows nodes in the cluster, the change will not be applicable to those nodes. -### Adding NodeSelector Settings and Tolerations for Custom Taints +## Adding NodeSelector Settings and Tolerations for Custom Taints You can add your own `nodeSelector` settings and add `tolerations` for additional taints by editing the logging Helm chart values. For details, see [this page.](taints-and-tolerations.md) -### Enabling the Logging Application to Work with SELinux +## Enabling the Logging Application to Work with SELinux :::note Requirements: @@ -49,7 +49,7 @@ To use Logging v2 with SELinux, we recommend installing the `rancher-selinux` RP Then, when installing the logging application, configure the chart to be SELinux aware by changing `global.seLinux.enabled` to `true` in the `values.yaml`. -### Additional Logging Sources +## Additional Logging Sources By default, Rancher collects logs for [control plane components](https://kubernetes.io/docs/concepts/overview/components/#control-plane-components) and [node components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for all cluster types. @@ -72,7 +72,7 @@ When enabled, Rancher collects all additional node and control plane logs the pr If you're already using a cloud provider's own logging solution such as AWS CloudWatch or Google Cloud operations suite (formerly Stackdriver), it is not necessary to enable this option as the native solution will have unrestricted access to all logs. -### Systemd Configuration +## Systemd Configuration In Rancher logging, `SystemdLogPath` must be configured for K3s and RKE2 Kubernetes distributions. @@ -87,7 +87,7 @@ K3s and RKE2 Kubernetes distributions log to journald, which is the subsystem of * If `/var/log/journal` exists, then use `/var/log/journal`. * If `/var/log/journal` does not exist, then use `/run/log/journal`. -:::note Notes: +:::note If any value not described above is returned, Rancher Logging will not be able to collect control plane logs. To address this issue, you will need to perform the following actions on every control plane node: From 58b77dc0d71b58de571ccf96b63378125d3597e9 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 11:52:42 -0400 Subject: [PATCH 23/31] fixed taints-and-tolerances.md --- .../integrations-in-rancher/logging/taints-and-tolerations.md | 4 ++-- .../integrations-in-rancher/logging/taints-and-tolerations.md | 4 ++-- .../integrations-in-rancher/logging/taints-and-tolerations.md | 4 ++-- .../integrations-in-rancher/logging/taints-and-tolerations.md | 4 ++-- .../integrations-in-rancher/logging/taints-and-tolerations.md | 4 ++-- .../integrations-in-rancher/logging/taints-and-tolerations.md | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/integrations-in-rancher/logging/taints-and-tolerations.md b/docs/integrations-in-rancher/logging/taints-and-tolerations.md index 327cf554fdaa..0147598e84cb 100644 --- a/docs/integrations-in-rancher/logging/taints-and-tolerations.md +++ b/docs/integrations-in-rancher/logging/taints-and-tolerations.md @@ -20,7 +20,7 @@ Both provide choice for the what node(s) the pod will run on. - [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints) -### Default Implementation in Rancher's Logging Stack +## Default Implementation in Rancher's Logging Stack By default, Rancher taints all Linux nodes with `cattle.io/os=linux`, and does not taint Windows nodes. The logging stack pods have `tolerations` for this taint, which enables them to run on Linux nodes. @@ -47,7 +47,7 @@ In the above example, we ensure that our pod only runs on Linux nodes, and we ad You can do the same with Rancher's existing taints, or with your own custom ones. -### Adding NodeSelector Settings and Tolerations for Custom Taints +## Adding NodeSelector Settings and Tolerations for Custom Taints If you would like to add your own `nodeSelector` settings, or if you would like to add `tolerations` for additional taints, you can pass the following to the chart's values. diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/taints-and-tolerations.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/taints-and-tolerations.md index 89633fa68d66..c6a4e0c2711e 100644 --- a/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/taints-and-tolerations.md +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/taints-and-tolerations.md @@ -20,7 +20,7 @@ Both provide choice for the what node(s) the pod will run on. - [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints) -### Default Implementation in Rancher's Logging Stack +## Default Implementation in Rancher's Logging Stack @@ -61,7 +61,7 @@ In the above example, we ensure that our pod only runs on Linux nodes, and we ad You can do the same with Rancher's existing taints, or with your own custom ones. -### Adding NodeSelector Settings and Tolerations for Custom Taints +## Adding NodeSelector Settings and Tolerations for Custom Taints If you would like to add your own `nodeSelector` settings, or if you would like to add `tolerations` for additional taints, you can pass the following to the chart's values. diff --git a/versioned_docs/version-2.6/integrations-in-rancher/logging/taints-and-tolerations.md b/versioned_docs/version-2.6/integrations-in-rancher/logging/taints-and-tolerations.md index 327cf554fdaa..0147598e84cb 100644 --- a/versioned_docs/version-2.6/integrations-in-rancher/logging/taints-and-tolerations.md +++ b/versioned_docs/version-2.6/integrations-in-rancher/logging/taints-and-tolerations.md @@ -20,7 +20,7 @@ Both provide choice for the what node(s) the pod will run on. - [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints) -### Default Implementation in Rancher's Logging Stack +## Default Implementation in Rancher's Logging Stack By default, Rancher taints all Linux nodes with `cattle.io/os=linux`, and does not taint Windows nodes. The logging stack pods have `tolerations` for this taint, which enables them to run on Linux nodes. @@ -47,7 +47,7 @@ In the above example, we ensure that our pod only runs on Linux nodes, and we ad You can do the same with Rancher's existing taints, or with your own custom ones. -### Adding NodeSelector Settings and Tolerations for Custom Taints +## Adding NodeSelector Settings and Tolerations for Custom Taints If you would like to add your own `nodeSelector` settings, or if you would like to add `tolerations` for additional taints, you can pass the following to the chart's values. diff --git a/versioned_docs/version-2.7/integrations-in-rancher/logging/taints-and-tolerations.md b/versioned_docs/version-2.7/integrations-in-rancher/logging/taints-and-tolerations.md index 327cf554fdaa..0147598e84cb 100644 --- a/versioned_docs/version-2.7/integrations-in-rancher/logging/taints-and-tolerations.md +++ b/versioned_docs/version-2.7/integrations-in-rancher/logging/taints-and-tolerations.md @@ -20,7 +20,7 @@ Both provide choice for the what node(s) the pod will run on. - [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints) -### Default Implementation in Rancher's Logging Stack +## Default Implementation in Rancher's Logging Stack By default, Rancher taints all Linux nodes with `cattle.io/os=linux`, and does not taint Windows nodes. The logging stack pods have `tolerations` for this taint, which enables them to run on Linux nodes. @@ -47,7 +47,7 @@ In the above example, we ensure that our pod only runs on Linux nodes, and we ad You can do the same with Rancher's existing taints, or with your own custom ones. -### Adding NodeSelector Settings and Tolerations for Custom Taints +## Adding NodeSelector Settings and Tolerations for Custom Taints If you would like to add your own `nodeSelector` settings, or if you would like to add `tolerations` for additional taints, you can pass the following to the chart's values. diff --git a/versioned_docs/version-2.8/integrations-in-rancher/logging/taints-and-tolerations.md b/versioned_docs/version-2.8/integrations-in-rancher/logging/taints-and-tolerations.md index 327cf554fdaa..0147598e84cb 100644 --- a/versioned_docs/version-2.8/integrations-in-rancher/logging/taints-and-tolerations.md +++ b/versioned_docs/version-2.8/integrations-in-rancher/logging/taints-and-tolerations.md @@ -20,7 +20,7 @@ Both provide choice for the what node(s) the pod will run on. - [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints) -### Default Implementation in Rancher's Logging Stack +## Default Implementation in Rancher's Logging Stack By default, Rancher taints all Linux nodes with `cattle.io/os=linux`, and does not taint Windows nodes. The logging stack pods have `tolerations` for this taint, which enables them to run on Linux nodes. @@ -47,7 +47,7 @@ In the above example, we ensure that our pod only runs on Linux nodes, and we ad You can do the same with Rancher's existing taints, or with your own custom ones. -### Adding NodeSelector Settings and Tolerations for Custom Taints +## Adding NodeSelector Settings and Tolerations for Custom Taints If you would like to add your own `nodeSelector` settings, or if you would like to add `tolerations` for additional taints, you can pass the following to the chart's values. diff --git a/versioned_docs/version-2.9/integrations-in-rancher/logging/taints-and-tolerations.md b/versioned_docs/version-2.9/integrations-in-rancher/logging/taints-and-tolerations.md index 327cf554fdaa..0147598e84cb 100644 --- a/versioned_docs/version-2.9/integrations-in-rancher/logging/taints-and-tolerations.md +++ b/versioned_docs/version-2.9/integrations-in-rancher/logging/taints-and-tolerations.md @@ -20,7 +20,7 @@ Both provide choice for the what node(s) the pod will run on. - [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints) -### Default Implementation in Rancher's Logging Stack +## Default Implementation in Rancher's Logging Stack By default, Rancher taints all Linux nodes with `cattle.io/os=linux`, and does not taint Windows nodes. The logging stack pods have `tolerations` for this taint, which enables them to run on Linux nodes. @@ -47,7 +47,7 @@ In the above example, we ensure that our pod only runs on Linux nodes, and we ad You can do the same with Rancher's existing taints, or with your own custom ones. -### Adding NodeSelector Settings and Tolerations for Custom Taints +## Adding NodeSelector Settings and Tolerations for Custom Taints If you would like to add your own `nodeSelector` settings, or if you would like to add `tolerations` for additional taints, you can pass the following to the chart's values. From da0ba61c8b2e069ea81bcb579d057c3913fa56ff Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 11:54:39 -0400 Subject: [PATCH 24/31] fixed longhorn/overview.md --- docs/integrations-in-rancher/longhorn/overview.md | 12 ++++++------ .../integrations-in-rancher/longhorn/overview.md | 12 ++++++------ .../integrations-in-rancher/longhorn/overview.md | 12 ++++++------ 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/integrations-in-rancher/longhorn/overview.md b/docs/integrations-in-rancher/longhorn/overview.md index db7e4a620761..13a581175d23 100644 --- a/docs/integrations-in-rancher/longhorn/overview.md +++ b/docs/integrations-in-rancher/longhorn/overview.md @@ -25,7 +25,7 @@ With Longhorn, you can: ![Longhorn Dashboard](/img/longhorn-screenshot.png) -### Installing Longhorn with Rancher +## Installing Longhorn with Rancher 1. Fulfill all [Installation Requirements.](https://longhorn.io/docs/latest/deploy/install/#installation-requirements) 1. Go to the cluster where you want to install Longhorn. @@ -37,14 +37,14 @@ With Longhorn, you can: **Result:** Longhorn is deployed in the Kubernetes cluster. -### Accessing Longhorn from the Rancher UI +## Accessing Longhorn from the Rancher UI 1. Go to the cluster where Longhorn is installed. In the left navigation menu, click **Longhorn**. 1. On this page, you can edit Kubernetes resources managed by Longhorn. To view the Longhorn UI, click the **Longhorn** button in the **Overview** section. **Result:** You will be taken to the Longhorn UI, where you can manage your Longhorn volumes and their replicas in the Kubernetes cluster, as well as secondary backups of your Longhorn storage that may exist in another Kubernetes cluster or in S3. -### Uninstalling Longhorn from the Rancher UI +## Uninstalling Longhorn from the Rancher UI 1. Go to the cluster where Longhorn is installed and click **Apps**. 1. Click **Installed Apps**. @@ -53,15 +53,15 @@ With Longhorn, you can: **Result:** Longhorn is uninstalled. -### GitHub Repository +## GitHub Repository The Longhorn project is available [here.](https://github.com/longhorn/longhorn) -### Documentation +## Documentation The Longhorn documentation is [here.](https://longhorn.io/docs/) -### Architecture +## Architecture Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. diff --git a/versioned_docs/version-2.8/integrations-in-rancher/longhorn/overview.md b/versioned_docs/version-2.8/integrations-in-rancher/longhorn/overview.md index db7e4a620761..13a581175d23 100644 --- a/versioned_docs/version-2.8/integrations-in-rancher/longhorn/overview.md +++ b/versioned_docs/version-2.8/integrations-in-rancher/longhorn/overview.md @@ -25,7 +25,7 @@ With Longhorn, you can: ![Longhorn Dashboard](/img/longhorn-screenshot.png) -### Installing Longhorn with Rancher +## Installing Longhorn with Rancher 1. Fulfill all [Installation Requirements.](https://longhorn.io/docs/latest/deploy/install/#installation-requirements) 1. Go to the cluster where you want to install Longhorn. @@ -37,14 +37,14 @@ With Longhorn, you can: **Result:** Longhorn is deployed in the Kubernetes cluster. -### Accessing Longhorn from the Rancher UI +## Accessing Longhorn from the Rancher UI 1. Go to the cluster where Longhorn is installed. In the left navigation menu, click **Longhorn**. 1. On this page, you can edit Kubernetes resources managed by Longhorn. To view the Longhorn UI, click the **Longhorn** button in the **Overview** section. **Result:** You will be taken to the Longhorn UI, where you can manage your Longhorn volumes and their replicas in the Kubernetes cluster, as well as secondary backups of your Longhorn storage that may exist in another Kubernetes cluster or in S3. -### Uninstalling Longhorn from the Rancher UI +## Uninstalling Longhorn from the Rancher UI 1. Go to the cluster where Longhorn is installed and click **Apps**. 1. Click **Installed Apps**. @@ -53,15 +53,15 @@ With Longhorn, you can: **Result:** Longhorn is uninstalled. -### GitHub Repository +## GitHub Repository The Longhorn project is available [here.](https://github.com/longhorn/longhorn) -### Documentation +## Documentation The Longhorn documentation is [here.](https://longhorn.io/docs/) -### Architecture +## Architecture Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. diff --git a/versioned_docs/version-2.9/integrations-in-rancher/longhorn/overview.md b/versioned_docs/version-2.9/integrations-in-rancher/longhorn/overview.md index db7e4a620761..13a581175d23 100644 --- a/versioned_docs/version-2.9/integrations-in-rancher/longhorn/overview.md +++ b/versioned_docs/version-2.9/integrations-in-rancher/longhorn/overview.md @@ -25,7 +25,7 @@ With Longhorn, you can: ![Longhorn Dashboard](/img/longhorn-screenshot.png) -### Installing Longhorn with Rancher +## Installing Longhorn with Rancher 1. Fulfill all [Installation Requirements.](https://longhorn.io/docs/latest/deploy/install/#installation-requirements) 1. Go to the cluster where you want to install Longhorn. @@ -37,14 +37,14 @@ With Longhorn, you can: **Result:** Longhorn is deployed in the Kubernetes cluster. -### Accessing Longhorn from the Rancher UI +## Accessing Longhorn from the Rancher UI 1. Go to the cluster where Longhorn is installed. In the left navigation menu, click **Longhorn**. 1. On this page, you can edit Kubernetes resources managed by Longhorn. To view the Longhorn UI, click the **Longhorn** button in the **Overview** section. **Result:** You will be taken to the Longhorn UI, where you can manage your Longhorn volumes and their replicas in the Kubernetes cluster, as well as secondary backups of your Longhorn storage that may exist in another Kubernetes cluster or in S3. -### Uninstalling Longhorn from the Rancher UI +## Uninstalling Longhorn from the Rancher UI 1. Go to the cluster where Longhorn is installed and click **Apps**. 1. Click **Installed Apps**. @@ -53,15 +53,15 @@ With Longhorn, you can: **Result:** Longhorn is uninstalled. -### GitHub Repository +## GitHub Repository The Longhorn project is available [here.](https://github.com/longhorn/longhorn) -### Documentation +## Documentation The Longhorn documentation is [here.](https://longhorn.io/docs/) -### Architecture +## Architecture Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. From cc83b6f5e1403c0fc77e8ea87b05dd0317d8b8a8 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 11:58:48 -0400 Subject: [PATCH 25/31] fixed neuvector/overview.md --- .../neuvector/overview.md | 29 ++++++++----------- .../neuvector/overview.md | 29 ++++++++----------- .../neuvector/overview.md | 28 ++++++++---------- 3 files changed, 36 insertions(+), 50 deletions(-) diff --git a/docs/integrations-in-rancher/neuvector/overview.md b/docs/integrations-in-rancher/neuvector/overview.md index cec0d643afdd..cf7ec91cdd9d 100644 --- a/docs/integrations-in-rancher/neuvector/overview.md +++ b/docs/integrations-in-rancher/neuvector/overview.md @@ -6,13 +6,11 @@ title: Overview -### NeuVector Integration in Rancher - [NeuVector 5.x](https://open-docs.neuvector.com/) is an open-source container-centric security platform that is integrated with Rancher. NeuVector offers real-time compliance, visibility, and protection for critical applications and data during runtime. NeuVector provides a firewall, container process/file system monitoring, security auditing with CIS benchmarks, and vulnerability scanning. For more information on Rancher security, please see the [security documentation](../../reference-guides/rancher-security/rancher-security.md). NeuVector can be enabled through a Helm chart that may be installed either through **Apps** or through the **Cluster Tools** button in the Rancher UI. Once the Helm chart is installed, users can easily [deploy and manage NeuVector clusters within Rancher](https://open-docs.neuvector.com/deploying/rancher#deploy-and-manage-neuvector-through-rancher-apps-marketplace). -### Installing NeuVector with Rancher +## Installing NeuVector with Rancher The Harvester Helm Chart is used to manage access to the NeuVector UI in Rancher where users can navigate directly to deploy and manage their NeuVector clusters. @@ -44,12 +42,12 @@ Some examples are as follows: 1. Click on **Cluster Tools** at the bottom of the left navigation bar. 1. Repeat step 4 above to select your container runtime accordingly, then click **Install** again. -### Accessing NeuVector from the Rancher UI +## Accessing NeuVector from the Rancher UI 1. Navigate to the cluster explorer of the cluster where NeuVector is installed. In the left navigation bar, click **NeuVector**. 1. Click the external link to go to the NeuVector UI. Once the link is selected, users must accept the `END USER LICENSE AGREEMENT` to access the NeuVector UI. -### Uninstalling NeuVector from the Rancher UI +## Uninstalling NeuVector from the Rancher UI **To uninstall from Apps:** @@ -62,15 +60,15 @@ Some examples are as follows: 1. Click **☰ > Cluster Management**. 1. Click on **Cluster Tools** at the bottom-left of the screen, then click on the trash can icon under the NeuVector chart. Select `Delete the CRD associated with this app` if desired, then click **Delete**. -### GitHub Repository +## GitHub Repository The NeuVector project is available [here](https://github.com/neuvector/neuvector). -### Documentation +## Documentation The NeuVector documentation is [here](https://open-docs.neuvector.com/). -### Architecture +## Architecture The NeuVector security solution contains four types of security containers: Controllers, Enforcers, Managers, and Scanners. A special container called an All-in-One is also provided to combine the Controller, Enforcer, and Manager functions all in one container, primarily for Docker-native deployments. There is also an Updater which, when run, will update the CVE database. @@ -91,7 +89,7 @@ The NeuVector security solution contains four types of security containers: Cont To learn more about NeuVector's architecture, please refer [here](https://open-docs.neuvector.com/basics/overview#architecture). -### CPU and Memory Allocations +## CPU and Memory Allocations Below are the minimum recommended computing resources for the NeuVector chart installation in a default deployment. Note that the resource limit is not set. @@ -105,7 +103,7 @@ Below are the minimum recommended computing resources for the NeuVector chart in \* Minimum 1GB of memory total required for Controller, Manager, and Scanner containers combined. -### Hardened Cluster Support - Calico and Canal +## Hardened Cluster Support - Calico and Canal @@ -162,7 +160,7 @@ Below are the minimum recommended computing resources for the NeuVector chart in -### SELinux-enabled Cluster Support - Calico and Canal +## SELinux-enabled Cluster Support - Calico and Canal To enable SELinux on RKE2 clusters, follow the steps below: @@ -179,12 +177,11 @@ kubectl patch deploy neuvector-scanner-pod -n cattle-neuvector-system --patch '{ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '{"spec":{"jobTemplate":{"spec":{"template":{"spec":{"securityContext":{"runAsUser": 5400}}}}}}}' ``` -### Cluster Support in an Air-Gapped Environment +## Cluster Support in an Air-Gapped Environment - All NeuVector components are deployable on a cluster in an air-gapped environment without any additional configuration needed. - -### Support Limitations +## Support Limitations * Only admins and cluster owners are currently supported. @@ -192,12 +189,10 @@ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch ' * NeuVector is not supported on a Windows cluster. - -### Other Limitations +## Other Limitations * Currently, NeuVector feature chart installation fails when a NeuVector partner chart already exists. To work around this issue, uninstall the NeuVector partner chart and reinstall the NeuVector feature chart. * Sometimes when the controllers are not ready, the NeuVector UI is not accessible from the Rancher UI. During this time, controllers will try to restart, and it takes a few minutes for the controllers to be active. * Container runtime is not auto-detected for different cluster types when installing the NeuVector chart. To work around this, you can specify the runtime manually. - diff --git a/versioned_docs/version-2.8/integrations-in-rancher/neuvector/overview.md b/versioned_docs/version-2.8/integrations-in-rancher/neuvector/overview.md index cec0d643afdd..cf7ec91cdd9d 100644 --- a/versioned_docs/version-2.8/integrations-in-rancher/neuvector/overview.md +++ b/versioned_docs/version-2.8/integrations-in-rancher/neuvector/overview.md @@ -6,13 +6,11 @@ title: Overview -### NeuVector Integration in Rancher - [NeuVector 5.x](https://open-docs.neuvector.com/) is an open-source container-centric security platform that is integrated with Rancher. NeuVector offers real-time compliance, visibility, and protection for critical applications and data during runtime. NeuVector provides a firewall, container process/file system monitoring, security auditing with CIS benchmarks, and vulnerability scanning. For more information on Rancher security, please see the [security documentation](../../reference-guides/rancher-security/rancher-security.md). NeuVector can be enabled through a Helm chart that may be installed either through **Apps** or through the **Cluster Tools** button in the Rancher UI. Once the Helm chart is installed, users can easily [deploy and manage NeuVector clusters within Rancher](https://open-docs.neuvector.com/deploying/rancher#deploy-and-manage-neuvector-through-rancher-apps-marketplace). -### Installing NeuVector with Rancher +## Installing NeuVector with Rancher The Harvester Helm Chart is used to manage access to the NeuVector UI in Rancher where users can navigate directly to deploy and manage their NeuVector clusters. @@ -44,12 +42,12 @@ Some examples are as follows: 1. Click on **Cluster Tools** at the bottom of the left navigation bar. 1. Repeat step 4 above to select your container runtime accordingly, then click **Install** again. -### Accessing NeuVector from the Rancher UI +## Accessing NeuVector from the Rancher UI 1. Navigate to the cluster explorer of the cluster where NeuVector is installed. In the left navigation bar, click **NeuVector**. 1. Click the external link to go to the NeuVector UI. Once the link is selected, users must accept the `END USER LICENSE AGREEMENT` to access the NeuVector UI. -### Uninstalling NeuVector from the Rancher UI +## Uninstalling NeuVector from the Rancher UI **To uninstall from Apps:** @@ -62,15 +60,15 @@ Some examples are as follows: 1. Click **☰ > Cluster Management**. 1. Click on **Cluster Tools** at the bottom-left of the screen, then click on the trash can icon under the NeuVector chart. Select `Delete the CRD associated with this app` if desired, then click **Delete**. -### GitHub Repository +## GitHub Repository The NeuVector project is available [here](https://github.com/neuvector/neuvector). -### Documentation +## Documentation The NeuVector documentation is [here](https://open-docs.neuvector.com/). -### Architecture +## Architecture The NeuVector security solution contains four types of security containers: Controllers, Enforcers, Managers, and Scanners. A special container called an All-in-One is also provided to combine the Controller, Enforcer, and Manager functions all in one container, primarily for Docker-native deployments. There is also an Updater which, when run, will update the CVE database. @@ -91,7 +89,7 @@ The NeuVector security solution contains four types of security containers: Cont To learn more about NeuVector's architecture, please refer [here](https://open-docs.neuvector.com/basics/overview#architecture). -### CPU and Memory Allocations +## CPU and Memory Allocations Below are the minimum recommended computing resources for the NeuVector chart installation in a default deployment. Note that the resource limit is not set. @@ -105,7 +103,7 @@ Below are the minimum recommended computing resources for the NeuVector chart in \* Minimum 1GB of memory total required for Controller, Manager, and Scanner containers combined. -### Hardened Cluster Support - Calico and Canal +## Hardened Cluster Support - Calico and Canal @@ -162,7 +160,7 @@ Below are the minimum recommended computing resources for the NeuVector chart in -### SELinux-enabled Cluster Support - Calico and Canal +## SELinux-enabled Cluster Support - Calico and Canal To enable SELinux on RKE2 clusters, follow the steps below: @@ -179,12 +177,11 @@ kubectl patch deploy neuvector-scanner-pod -n cattle-neuvector-system --patch '{ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '{"spec":{"jobTemplate":{"spec":{"template":{"spec":{"securityContext":{"runAsUser": 5400}}}}}}}' ``` -### Cluster Support in an Air-Gapped Environment +## Cluster Support in an Air-Gapped Environment - All NeuVector components are deployable on a cluster in an air-gapped environment without any additional configuration needed. - -### Support Limitations +## Support Limitations * Only admins and cluster owners are currently supported. @@ -192,12 +189,10 @@ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch ' * NeuVector is not supported on a Windows cluster. - -### Other Limitations +## Other Limitations * Currently, NeuVector feature chart installation fails when a NeuVector partner chart already exists. To work around this issue, uninstall the NeuVector partner chart and reinstall the NeuVector feature chart. * Sometimes when the controllers are not ready, the NeuVector UI is not accessible from the Rancher UI. During this time, controllers will try to restart, and it takes a few minutes for the controllers to be active. * Container runtime is not auto-detected for different cluster types when installing the NeuVector chart. To work around this, you can specify the runtime manually. - diff --git a/versioned_docs/version-2.9/integrations-in-rancher/neuvector/overview.md b/versioned_docs/version-2.9/integrations-in-rancher/neuvector/overview.md index cec0d643afdd..1d3df3bda308 100644 --- a/versioned_docs/version-2.9/integrations-in-rancher/neuvector/overview.md +++ b/versioned_docs/version-2.9/integrations-in-rancher/neuvector/overview.md @@ -6,13 +6,11 @@ title: Overview -### NeuVector Integration in Rancher - [NeuVector 5.x](https://open-docs.neuvector.com/) is an open-source container-centric security platform that is integrated with Rancher. NeuVector offers real-time compliance, visibility, and protection for critical applications and data during runtime. NeuVector provides a firewall, container process/file system monitoring, security auditing with CIS benchmarks, and vulnerability scanning. For more information on Rancher security, please see the [security documentation](../../reference-guides/rancher-security/rancher-security.md). NeuVector can be enabled through a Helm chart that may be installed either through **Apps** or through the **Cluster Tools** button in the Rancher UI. Once the Helm chart is installed, users can easily [deploy and manage NeuVector clusters within Rancher](https://open-docs.neuvector.com/deploying/rancher#deploy-and-manage-neuvector-through-rancher-apps-marketplace). -### Installing NeuVector with Rancher +## Installing NeuVector with Rancher The Harvester Helm Chart is used to manage access to the NeuVector UI in Rancher where users can navigate directly to deploy and manage their NeuVector clusters. @@ -44,12 +42,12 @@ Some examples are as follows: 1. Click on **Cluster Tools** at the bottom of the left navigation bar. 1. Repeat step 4 above to select your container runtime accordingly, then click **Install** again. -### Accessing NeuVector from the Rancher UI +## Accessing NeuVector from the Rancher UI 1. Navigate to the cluster explorer of the cluster where NeuVector is installed. In the left navigation bar, click **NeuVector**. 1. Click the external link to go to the NeuVector UI. Once the link is selected, users must accept the `END USER LICENSE AGREEMENT` to access the NeuVector UI. -### Uninstalling NeuVector from the Rancher UI +## Uninstalling NeuVector from the Rancher UI **To uninstall from Apps:** @@ -62,15 +60,15 @@ Some examples are as follows: 1. Click **☰ > Cluster Management**. 1. Click on **Cluster Tools** at the bottom-left of the screen, then click on the trash can icon under the NeuVector chart. Select `Delete the CRD associated with this app` if desired, then click **Delete**. -### GitHub Repository +## GitHub Repository The NeuVector project is available [here](https://github.com/neuvector/neuvector). -### Documentation +## Documentation The NeuVector documentation is [here](https://open-docs.neuvector.com/). -### Architecture +## Architecture The NeuVector security solution contains four types of security containers: Controllers, Enforcers, Managers, and Scanners. A special container called an All-in-One is also provided to combine the Controller, Enforcer, and Manager functions all in one container, primarily for Docker-native deployments. There is also an Updater which, when run, will update the CVE database. @@ -91,7 +89,7 @@ The NeuVector security solution contains four types of security containers: Cont To learn more about NeuVector's architecture, please refer [here](https://open-docs.neuvector.com/basics/overview#architecture). -### CPU and Memory Allocations +## CPU and Memory Allocations Below are the minimum recommended computing resources for the NeuVector chart installation in a default deployment. Note that the resource limit is not set. @@ -105,7 +103,7 @@ Below are the minimum recommended computing resources for the NeuVector chart in \* Minimum 1GB of memory total required for Controller, Manager, and Scanner containers combined. -### Hardened Cluster Support - Calico and Canal +## Hardened Cluster Support - Calico and Canal @@ -162,7 +160,7 @@ Below are the minimum recommended computing resources for the NeuVector chart in -### SELinux-enabled Cluster Support - Calico and Canal +## SELinux-enabled Cluster Support - Calico and Canal To enable SELinux on RKE2 clusters, follow the steps below: @@ -179,12 +177,11 @@ kubectl patch deploy neuvector-scanner-pod -n cattle-neuvector-system --patch '{ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '{"spec":{"jobTemplate":{"spec":{"template":{"spec":{"securityContext":{"runAsUser": 5400}}}}}}}' ``` -### Cluster Support in an Air-Gapped Environment +## Cluster Support in an Air-Gapped Environment - All NeuVector components are deployable on a cluster in an air-gapped environment without any additional configuration needed. - -### Support Limitations +## Support Limitations * Only admins and cluster owners are currently supported. @@ -192,8 +189,7 @@ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch ' * NeuVector is not supported on a Windows cluster. - -### Other Limitations +## Other Limitations * Currently, NeuVector feature chart installation fails when a NeuVector partner chart already exists. To work around this issue, uninstall the NeuVector partner chart and reinstall the NeuVector feature chart. From d8b1dc96275eec7b4e6942b11519cb2c3e7c96bf Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 12:13:10 -0400 Subject: [PATCH 26/31] fixed monitoring-and-alerting --- .../monitoring-and-alerting/monitoring-and-alerting.md | 3 +-- .../monitoring-and-alerting/monitoring-and-alerting.md | 3 +-- .../monitoring-and-alerting/monitoring-and-alerting.md | 4 +--- .../monitoring-and-alerting/monitoring-and-alerting.md | 3 +-- .../monitoring-and-alerting/monitoring-and-alerting.md | 3 +-- .../monitoring-and-alerting/monitoring-and-alerting.md | 3 +-- 6 files changed, 6 insertions(+), 13 deletions(-) diff --git a/docs/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md b/docs/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md index da6460a0da70..79b243cb78de 100644 --- a/docs/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md +++ b/docs/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md @@ -15,7 +15,7 @@ For information on V1 monitoring and alerting, available in Rancher v2.2 up to v Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. -### Features +## Features Prometheus lets you view metrics from your Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or Grafana, which is an analytics viewing platform deployed along with Prometheus. @@ -97,7 +97,6 @@ To be able to fully deploy Monitoring V2 for Windows, all of your Windows hosts For more details on how to upgrade wins on existing Windows hosts, see [Windows cluster support for Monitoring V2.](windows-support.md). - ## Known Issues There is a [known issue](https://github.com/rancher/rancher/issues/28787#issuecomment-693611821) that K3s clusters require more than the allotted default memory. If you enable monitoring on a K3s cluster, set `prometheus.prometheusSpec.resources.memory.limit` to 2500 Mi and `prometheus.prometheusSpec.resources.memory.request` to 1750 Mi. diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md index d5f65a61fb9e..d73044405e8d 100644 --- a/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md @@ -9,8 +9,7 @@ description: Prometheus lets you view metrics from your different Rancher and Ku Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. - -### Features +## Features Prometheus lets you view metrics from your Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or Grafana, which is an analytics viewing platform deployed along with Prometheus. diff --git a/versioned_docs/version-2.6/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md b/versioned_docs/version-2.6/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md index 30126f607414..3960c691d013 100644 --- a/versioned_docs/version-2.6/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md +++ b/versioned_docs/version-2.6/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md @@ -9,8 +9,7 @@ description: Prometheus lets you view metrics from your different Rancher and Ku Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. - -### Features +## Features Prometheus lets you view metrics from your Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or Grafana, which is an analytics viewing platform deployed along with Prometheus. @@ -91,7 +90,6 @@ To be able to fully deploy Monitoring V2 for Windows, all of your Windows hosts For more details on how to upgrade wins on existing Windows hosts, refer to the section on [Windows cluster support for Monitoring V2.](windows-support.md) - ## Known Issues There is a [known issue](https://github.com/rancher/rancher/issues/28787#issuecomment-693611821) that K3s clusters require more default memory. If you are enabling monitoring on a K3s cluster, we recommend to setting `prometheus.prometheusSpec.resources.memory.limit` to 2500 Mi and `prometheus.prometheusSpec.resources.memory.request` to 1750 Mi. diff --git a/versioned_docs/version-2.7/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md b/versioned_docs/version-2.7/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md index da6460a0da70..79b243cb78de 100644 --- a/versioned_docs/version-2.7/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md +++ b/versioned_docs/version-2.7/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md @@ -15,7 +15,7 @@ For information on V1 monitoring and alerting, available in Rancher v2.2 up to v Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. -### Features +## Features Prometheus lets you view metrics from your Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or Grafana, which is an analytics viewing platform deployed along with Prometheus. @@ -97,7 +97,6 @@ To be able to fully deploy Monitoring V2 for Windows, all of your Windows hosts For more details on how to upgrade wins on existing Windows hosts, see [Windows cluster support for Monitoring V2.](windows-support.md). - ## Known Issues There is a [known issue](https://github.com/rancher/rancher/issues/28787#issuecomment-693611821) that K3s clusters require more than the allotted default memory. If you enable monitoring on a K3s cluster, set `prometheus.prometheusSpec.resources.memory.limit` to 2500 Mi and `prometheus.prometheusSpec.resources.memory.request` to 1750 Mi. diff --git a/versioned_docs/version-2.8/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md b/versioned_docs/version-2.8/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md index da6460a0da70..79b243cb78de 100644 --- a/versioned_docs/version-2.8/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md +++ b/versioned_docs/version-2.8/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md @@ -15,7 +15,7 @@ For information on V1 monitoring and alerting, available in Rancher v2.2 up to v Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. -### Features +## Features Prometheus lets you view metrics from your Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or Grafana, which is an analytics viewing platform deployed along with Prometheus. @@ -97,7 +97,6 @@ To be able to fully deploy Monitoring V2 for Windows, all of your Windows hosts For more details on how to upgrade wins on existing Windows hosts, see [Windows cluster support for Monitoring V2.](windows-support.md). - ## Known Issues There is a [known issue](https://github.com/rancher/rancher/issues/28787#issuecomment-693611821) that K3s clusters require more than the allotted default memory. If you enable monitoring on a K3s cluster, set `prometheus.prometheusSpec.resources.memory.limit` to 2500 Mi and `prometheus.prometheusSpec.resources.memory.request` to 1750 Mi. diff --git a/versioned_docs/version-2.9/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md b/versioned_docs/version-2.9/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md index da6460a0da70..79b243cb78de 100644 --- a/versioned_docs/version-2.9/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md +++ b/versioned_docs/version-2.9/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md @@ -15,7 +15,7 @@ For information on V1 monitoring and alerting, available in Rancher v2.2 up to v Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. -### Features +## Features Prometheus lets you view metrics from your Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or Grafana, which is an analytics viewing platform deployed along with Prometheus. @@ -97,7 +97,6 @@ To be able to fully deploy Monitoring V2 for Windows, all of your Windows hosts For more details on how to upgrade wins on existing Windows hosts, see [Windows cluster support for Monitoring V2.](windows-support.md). - ## Known Issues There is a [known issue](https://github.com/rancher/rancher/issues/28787#issuecomment-693611821) that K3s clusters require more than the allotted default memory. If you enable monitoring on a K3s cluster, set `prometheus.prometheusSpec.resources.memory.limit` to 2500 Mi and `prometheus.prometheusSpec.resources.memory.request` to 1750 Mi. From 32c90ce05281ac5d6f45fa3634e9fd78b566cc3b Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 12:17:14 -0400 Subject: [PATCH 27/31] fixed rancher-cli.md --- .../cli-with-rancher/rancher-cli.md | 14 +++++++------- .../cli-with-rancher/rancher-cli.md | 15 +++++++-------- .../cli-with-rancher/rancher-cli.md | 14 +++++++------- .../cli-with-rancher/rancher-cli.md | 15 +++++++-------- .../cli-with-rancher/rancher-cli.md | 15 +++++++-------- .../cli-with-rancher/rancher-cli.md | 14 +++++++------- .../cli-with-rancher/rancher-cli.md | 14 +++++++------- 7 files changed, 49 insertions(+), 52 deletions(-) diff --git a/docs/reference-guides/cli-with-rancher/rancher-cli.md b/docs/reference-guides/cli-with-rancher/rancher-cli.md index e93d9805480d..adff436c8e5b 100644 --- a/docs/reference-guides/cli-with-rancher/rancher-cli.md +++ b/docs/reference-guides/cli-with-rancher/rancher-cli.md @@ -9,7 +9,7 @@ description: Interact with Rancher using command line interface (CLI) tools from The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. -### Download Rancher CLI +## Download Rancher CLI The binary can be downloaded directly from the UI. @@ -17,14 +17,14 @@ The binary can be downloaded directly from the UI. 1. At the bottom of the navigation sidebar menu, click **About**. 1. Under the **CLI Downloads section**, there are links to download the binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. -### Requirements +## Requirements After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - Your Rancher Server URL, which is used to connect to Rancher Server. - An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key](../user-settings/api-keys.md). -### CLI Authentication +## CLI Authentication Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): @@ -34,7 +34,7 @@ $ ./rancher login https:// --token If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. -### Project Selection +## Project Selection Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. @@ -58,7 +58,7 @@ INFO[0005] Saving config to /Users/markbishop/.ranchcli2.json Ensure you can run `rancher kubectl get pods` successfully. -### Commands +## Commands The following commands are available for use in Rancher CLI. @@ -86,12 +86,12 @@ The following commands are available for use in Rancher CLI. | `token` | Authenticates and generates new kubeconfig token. | | `help, [h]` | Shows a list of commands or help for one command. | -### Rancher CLI Help +## Rancher CLI Help Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. All commands accept the `--help` flag, which documents each command's usage. -### Limitations +## Limitations The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md). diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/rancher-cli.md b/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/rancher-cli.md index a0d5712bcab3..c79f90d0be49 100644 --- a/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/rancher-cli.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/rancher-cli.md @@ -9,7 +9,7 @@ description: Interact with Rancher using command line interface (CLI) tools from The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. -### Download Rancher CLI +## Download Rancher CLI The binary can be downloaded directly from the UI. The link can be found in the right hand side of the footer in the UI. We have binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. @@ -17,14 +17,14 @@ The binary can be downloaded directly from the UI. The link can be found in the 1. At the bottom, click **v2.6.x**, where **v2.6.x** is a hyperlinked text indicating the installed Rancher version. 1. Under the **CLI Downloads section**, there are links to download the binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. -### Requirements +## Requirements After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - Your Rancher Server URL, which is used to connect to Rancher Server. - An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key](../user-settings/api-keys.md). -### CLI Authentication +## CLI Authentication Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): @@ -34,7 +34,7 @@ $ ./rancher login https:// --token If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. -### Project Selection +## Project Selection Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project](../../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. @@ -58,7 +58,7 @@ INFO[0005] Saving config to /Users/markbishop/.ranchcli2.json Ensure you can run `rancher kubectl get pods` successfully. -### Commands +## Commands The following commands are available for use in Rancher CLI. @@ -79,13 +79,12 @@ The following commands are available for use in Rancher CLI. | `ssh` | Connects to one of your cluster nodes using the SSH protocol. | | `help, [h]` | Shows a list of commands or help for one command. | - -### Rancher CLI Help +## Rancher CLI Help Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. All commands accept the `--help` flag, which documents each command's usage. -### Limitations +## Limitations The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md). \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/cli-with-rancher/rancher-cli.md b/versioned_docs/version-2.5/reference-guides/cli-with-rancher/rancher-cli.md index 82c3a255b49d..732868826c68 100644 --- a/versioned_docs/version-2.5/reference-guides/cli-with-rancher/rancher-cli.md +++ b/versioned_docs/version-2.5/reference-guides/cli-with-rancher/rancher-cli.md @@ -9,18 +9,18 @@ description: Interact with Rancher using command line interface (CLI) tools from The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. -### Download Rancher CLI +## Download Rancher CLI The binary can be downloaded directly from the UI. The link can be found in the right hand side of the footer in the UI. We have binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. -### Requirements +## Requirements After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - Your Rancher Server URL, which is used to connect to Rancher Server. - An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key](../../reference-guides/user-settings/api-keys.md). -### CLI Authentication +## CLI Authentication Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): @@ -30,7 +30,7 @@ $ ./rancher login https:// --token If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. -### Project Selection +## Project Selection Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project](../../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. @@ -54,7 +54,7 @@ INFO[0005] Saving config to /Users/markbishop/.ranchcli2.json Ensure you can run `rancher kubectl get pods` successfully. -### Commands +## Commands The following commands are available for use in Rancher CLI. @@ -76,12 +76,12 @@ The following commands are available for use in Rancher CLI. | `help, [h]` | Shows a list of commands or help for one command. | -### Rancher CLI Help +## Rancher CLI Help Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. All commands accept the `--help` flag, which documents each command's usage. -### Limitations +## Limitations The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](../../how-to-guides/new-user-guides/helm-charts-in-rancher.md). diff --git a/versioned_docs/version-2.6/reference-guides/cli-with-rancher/rancher-cli.md b/versioned_docs/version-2.6/reference-guides/cli-with-rancher/rancher-cli.md index 17965c67cd17..b31257f26d9b 100644 --- a/versioned_docs/version-2.6/reference-guides/cli-with-rancher/rancher-cli.md +++ b/versioned_docs/version-2.6/reference-guides/cli-with-rancher/rancher-cli.md @@ -9,7 +9,7 @@ description: Interact with Rancher using command line interface (CLI) tools from The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. -### Download Rancher CLI +## Download Rancher CLI The binary can be downloaded directly from the UI. @@ -17,14 +17,14 @@ The binary can be downloaded directly from the UI. 1. At the bottom, click **v2.6.x**, where **v2.6.x** is a hyperlinked text indicating the installed Rancher version. 1. Under the **CLI Downloads section**, there are links to download the binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. -### Requirements +## Requirements After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - Your Rancher Server URL, which is used to connect to Rancher Server. - An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key](../user-settings/api-keys.md). -### CLI Authentication +## CLI Authentication Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): @@ -34,7 +34,7 @@ $ ./rancher login https:// --token If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. -### Project Selection +## Project Selection Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. @@ -58,7 +58,7 @@ INFO[0005] Saving config to /Users/markbishop/.ranchcli2.json Ensure you can run `rancher kubectl get pods` successfully. -### Commands +## Commands The following commands are available for use in Rancher CLI. @@ -86,13 +86,12 @@ The following commands are available for use in Rancher CLI. | `token` | Authenticates and generates new kubeconfig token. | | `help, [h]` | Shows a list of commands or help for one command. | - -### Rancher CLI Help +## Rancher CLI Help Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. All commands accept the `--help` flag, which documents each command's usage. -### Limitations +## Limitations The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md). diff --git a/versioned_docs/version-2.7/reference-guides/cli-with-rancher/rancher-cli.md b/versioned_docs/version-2.7/reference-guides/cli-with-rancher/rancher-cli.md index 9f5ff644b5a7..2c9209cbb1e8 100644 --- a/versioned_docs/version-2.7/reference-guides/cli-with-rancher/rancher-cli.md +++ b/versioned_docs/version-2.7/reference-guides/cli-with-rancher/rancher-cli.md @@ -9,7 +9,7 @@ description: Interact with Rancher using command line interface (CLI) tools from The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. -### Download Rancher CLI +## Download Rancher CLI The binary can be downloaded directly from the UI. @@ -17,14 +17,14 @@ The binary can be downloaded directly from the UI. 1. At the bottom of the navigation sidebar menu, click **About**. 1. Under the **CLI Downloads section**, there are links to download the binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. -### Requirements +## Requirements After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - Your Rancher Server URL, which is used to connect to Rancher Server. - An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key](../user-settings/api-keys.md). -### CLI Authentication +## CLI Authentication Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): @@ -34,7 +34,7 @@ $ ./rancher login https:// --token If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. -### Project Selection +## Project Selection Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. @@ -58,7 +58,7 @@ INFO[0005] Saving config to /Users/markbishop/.ranchcli2.json Ensure you can run `rancher kubectl get pods` successfully. -### Commands +## Commands The following commands are available for use in Rancher CLI. @@ -86,13 +86,12 @@ The following commands are available for use in Rancher CLI. | `token` | Authenticates and generates new kubeconfig token. | | `help, [h]` | Shows a list of commands or help for one command. | - -### Rancher CLI Help +## Rancher CLI Help Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. All commands accept the `--help` flag, which documents each command's usage. -### Limitations +## Limitations The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md). diff --git a/versioned_docs/version-2.8/reference-guides/cli-with-rancher/rancher-cli.md b/versioned_docs/version-2.8/reference-guides/cli-with-rancher/rancher-cli.md index e93d9805480d..adff436c8e5b 100644 --- a/versioned_docs/version-2.8/reference-guides/cli-with-rancher/rancher-cli.md +++ b/versioned_docs/version-2.8/reference-guides/cli-with-rancher/rancher-cli.md @@ -9,7 +9,7 @@ description: Interact with Rancher using command line interface (CLI) tools from The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. -### Download Rancher CLI +## Download Rancher CLI The binary can be downloaded directly from the UI. @@ -17,14 +17,14 @@ The binary can be downloaded directly from the UI. 1. At the bottom of the navigation sidebar menu, click **About**. 1. Under the **CLI Downloads section**, there are links to download the binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. -### Requirements +## Requirements After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - Your Rancher Server URL, which is used to connect to Rancher Server. - An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key](../user-settings/api-keys.md). -### CLI Authentication +## CLI Authentication Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): @@ -34,7 +34,7 @@ $ ./rancher login https:// --token If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. -### Project Selection +## Project Selection Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. @@ -58,7 +58,7 @@ INFO[0005] Saving config to /Users/markbishop/.ranchcli2.json Ensure you can run `rancher kubectl get pods` successfully. -### Commands +## Commands The following commands are available for use in Rancher CLI. @@ -86,12 +86,12 @@ The following commands are available for use in Rancher CLI. | `token` | Authenticates and generates new kubeconfig token. | | `help, [h]` | Shows a list of commands or help for one command. | -### Rancher CLI Help +## Rancher CLI Help Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. All commands accept the `--help` flag, which documents each command's usage. -### Limitations +## Limitations The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md). diff --git a/versioned_docs/version-2.9/reference-guides/cli-with-rancher/rancher-cli.md b/versioned_docs/version-2.9/reference-guides/cli-with-rancher/rancher-cli.md index e93d9805480d..adff436c8e5b 100644 --- a/versioned_docs/version-2.9/reference-guides/cli-with-rancher/rancher-cli.md +++ b/versioned_docs/version-2.9/reference-guides/cli-with-rancher/rancher-cli.md @@ -9,7 +9,7 @@ description: Interact with Rancher using command line interface (CLI) tools from The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. -### Download Rancher CLI +## Download Rancher CLI The binary can be downloaded directly from the UI. @@ -17,14 +17,14 @@ The binary can be downloaded directly from the UI. 1. At the bottom of the navigation sidebar menu, click **About**. 1. Under the **CLI Downloads section**, there are links to download the binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. -### Requirements +## Requirements After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - Your Rancher Server URL, which is used to connect to Rancher Server. - An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key](../user-settings/api-keys.md). -### CLI Authentication +## CLI Authentication Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): @@ -34,7 +34,7 @@ $ ./rancher login https:// --token If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. -### Project Selection +## Project Selection Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. @@ -58,7 +58,7 @@ INFO[0005] Saving config to /Users/markbishop/.ranchcli2.json Ensure you can run `rancher kubectl get pods` successfully. -### Commands +## Commands The following commands are available for use in Rancher CLI. @@ -86,12 +86,12 @@ The following commands are available for use in Rancher CLI. | `token` | Authenticates and generates new kubeconfig token. | | `help, [h]` | Shows a list of commands or help for one command. | -### Rancher CLI Help +## Rancher CLI Help Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. All commands accept the `--help` flag, which documents each command's usage. -### Limitations +## Limitations The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md). From 580ba5910c1f7c6f68a01862def6a8368201e0fa Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 14:19:50 -0400 Subject: [PATCH 28/31] fixed cluster-configuration.md --- .../cluster-configuration/cluster-configuration.md | 4 ++-- .../cluster-configuration/cluster-configuration.md | 11 ++++------- .../cluster-configuration/cluster-configuration.md | 4 ++-- .../cluster-configuration/cluster-configuration.md | 4 ++-- .../cluster-configuration/cluster-configuration.md | 4 ++-- .../cluster-configuration/cluster-configuration.md | 4 ++-- .../cluster-configuration/cluster-configuration.md | 4 ++-- 7 files changed, 16 insertions(+), 19 deletions(-) diff --git a/docs/reference-guides/cluster-configuration/cluster-configuration.md b/docs/reference-guides/cluster-configuration/cluster-configuration.md index 139023b17d70..8abd3377435a 100644 --- a/docs/reference-guides/cluster-configuration/cluster-configuration.md +++ b/docs/reference-guides/cluster-configuration/cluster-configuration.md @@ -10,7 +10,7 @@ After you provision a Kubernetes cluster using Rancher, you can still edit optio For information on editing cluster membership, go to [this page.](../../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) -### Cluster Configuration References +## Cluster Configuration References The cluster configuration options depend on the type of Kubernetes cluster: @@ -21,7 +21,7 @@ The cluster configuration options depend on the type of Kubernetes cluster: - [GKE Cluster Configuration](rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md) - [AKS Cluster Configuration](rancher-server-configuration/aks-cluster-configuration.md) -### Cluster Management Capabilities by Cluster Type +## Cluster Management Capabilities by Cluster Type The options and settings available for an existing cluster change based on the method that you used to provision it. diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/cluster-configuration.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/cluster-configuration.md index 5508edbcd0ef..3a24fb846c56 100644 --- a/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/cluster-configuration.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/cluster-configuration.md @@ -15,7 +15,7 @@ For information on editing cluster membership, go to [this page.](../../how-to-g - [Editing Clusters with YAML](#editing-clusters-with-yaml) - [Updating ingress-nginx](#updating-ingress-nginx) -### Cluster Management Capabilities by Cluster Type +## Cluster Management Capabilities by Cluster Type The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) have **Cluster Options** available for editing. @@ -25,7 +25,7 @@ import ClusterCapabilitiesTable from '../../shared-files/_cluster-capabilities-t -### Editing Clusters in the Rancher UI +## Editing Clusters in the Rancher UI To edit your cluster, open the **Global** view, make sure the **Clusters** tab is selected, and then select **⋮ > Edit** for the cluster that you want to edit. @@ -46,7 +46,7 @@ Option | Description | Default Pod Security Policy | If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. | Cloud Provider | If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/set-up-cloud-providers.md) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. | -### Editing Clusters with YAML +## Editing Clusters with YAML Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. @@ -63,10 +63,7 @@ In Rancher v2.0.0-v2.2.x, the config file is identical to the [cluster config f >**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. - - - -### Updating ingress-nginx +## Updating ingress-nginx Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. diff --git a/versioned_docs/version-2.5/reference-guides/cluster-configuration/cluster-configuration.md b/versioned_docs/version-2.5/reference-guides/cluster-configuration/cluster-configuration.md index 4a0fc2386041..a68621b0d25b 100644 --- a/versioned_docs/version-2.5/reference-guides/cluster-configuration/cluster-configuration.md +++ b/versioned_docs/version-2.5/reference-guides/cluster-configuration/cluster-configuration.md @@ -10,7 +10,7 @@ After you provision a Kubernetes cluster using Rancher, you can still edit optio For information on editing cluster membership, go to [this page.](../../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) -### Cluster Configuration References +## Cluster Configuration References The cluster configuration options depend on the type of Kubernetes cluster: @@ -18,7 +18,7 @@ The cluster configuration options depend on the type of Kubernetes cluster: - [EKS Cluster Configuration](rancher-server-configuration/eks-cluster-configuration.md) - [GKE Cluster Configuration](rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md) -### Cluster Management Capabilities by Cluster Type +## Cluster Management Capabilities by Cluster Type The options and settings available for an existing cluster change based on the method that you used to provision it. diff --git a/versioned_docs/version-2.6/reference-guides/cluster-configuration/cluster-configuration.md b/versioned_docs/version-2.6/reference-guides/cluster-configuration/cluster-configuration.md index fb16d619292e..2666d6a1bace 100644 --- a/versioned_docs/version-2.6/reference-guides/cluster-configuration/cluster-configuration.md +++ b/versioned_docs/version-2.6/reference-guides/cluster-configuration/cluster-configuration.md @@ -10,7 +10,7 @@ After you provision a Kubernetes cluster using Rancher, you can still edit optio For information on editing cluster membership, go to [this page.](../../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) -### Cluster Configuration References +## Cluster Configuration References The cluster configuration options depend on the type of Kubernetes cluster: @@ -21,7 +21,7 @@ The cluster configuration options depend on the type of Kubernetes cluster: - [GKE Cluster Configuration](rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md) - [AKS Cluster Configuration](rancher-server-configuration/aks-cluster-configuration.md) -### Cluster Management Capabilities by Cluster Type +## Cluster Management Capabilities by Cluster Type The options and settings available for an existing cluster change based on the method that you used to provision it. diff --git a/versioned_docs/version-2.7/reference-guides/cluster-configuration/cluster-configuration.md b/versioned_docs/version-2.7/reference-guides/cluster-configuration/cluster-configuration.md index fb16d619292e..2666d6a1bace 100644 --- a/versioned_docs/version-2.7/reference-guides/cluster-configuration/cluster-configuration.md +++ b/versioned_docs/version-2.7/reference-guides/cluster-configuration/cluster-configuration.md @@ -10,7 +10,7 @@ After you provision a Kubernetes cluster using Rancher, you can still edit optio For information on editing cluster membership, go to [this page.](../../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) -### Cluster Configuration References +## Cluster Configuration References The cluster configuration options depend on the type of Kubernetes cluster: @@ -21,7 +21,7 @@ The cluster configuration options depend on the type of Kubernetes cluster: - [GKE Cluster Configuration](rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md) - [AKS Cluster Configuration](rancher-server-configuration/aks-cluster-configuration.md) -### Cluster Management Capabilities by Cluster Type +## Cluster Management Capabilities by Cluster Type The options and settings available for an existing cluster change based on the method that you used to provision it. diff --git a/versioned_docs/version-2.8/reference-guides/cluster-configuration/cluster-configuration.md b/versioned_docs/version-2.8/reference-guides/cluster-configuration/cluster-configuration.md index fb16d619292e..2666d6a1bace 100644 --- a/versioned_docs/version-2.8/reference-guides/cluster-configuration/cluster-configuration.md +++ b/versioned_docs/version-2.8/reference-guides/cluster-configuration/cluster-configuration.md @@ -10,7 +10,7 @@ After you provision a Kubernetes cluster using Rancher, you can still edit optio For information on editing cluster membership, go to [this page.](../../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) -### Cluster Configuration References +## Cluster Configuration References The cluster configuration options depend on the type of Kubernetes cluster: @@ -21,7 +21,7 @@ The cluster configuration options depend on the type of Kubernetes cluster: - [GKE Cluster Configuration](rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md) - [AKS Cluster Configuration](rancher-server-configuration/aks-cluster-configuration.md) -### Cluster Management Capabilities by Cluster Type +## Cluster Management Capabilities by Cluster Type The options and settings available for an existing cluster change based on the method that you used to provision it. diff --git a/versioned_docs/version-2.9/reference-guides/cluster-configuration/cluster-configuration.md b/versioned_docs/version-2.9/reference-guides/cluster-configuration/cluster-configuration.md index 139023b17d70..8abd3377435a 100644 --- a/versioned_docs/version-2.9/reference-guides/cluster-configuration/cluster-configuration.md +++ b/versioned_docs/version-2.9/reference-guides/cluster-configuration/cluster-configuration.md @@ -10,7 +10,7 @@ After you provision a Kubernetes cluster using Rancher, you can still edit optio For information on editing cluster membership, go to [this page.](../../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) -### Cluster Configuration References +## Cluster Configuration References The cluster configuration options depend on the type of Kubernetes cluster: @@ -21,7 +21,7 @@ The cluster configuration options depend on the type of Kubernetes cluster: - [GKE Cluster Configuration](rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md) - [AKS Cluster Configuration](rancher-server-configuration/aks-cluster-configuration.md) -### Cluster Management Capabilities by Cluster Type +## Cluster Management Capabilities by Cluster Type The options and settings available for an existing cluster change based on the method that you used to provision it. From b4cc408404695c34f887b6bb03a7d6223b24c032 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 14:23:03 -0400 Subject: [PATCH 29/31] fixed monitoring-v2-configuration/examples.md --- .../monitoring-v2-configuration/examples.md | 8 ++++---- .../monitoring-v2-configuration/examples.md | 8 ++++---- .../monitoring-v2-configuration/examples.md | 8 ++++---- .../monitoring-v2-configuration/examples.md | 8 ++++---- .../monitoring-v2-configuration/examples.md | 8 ++++---- .../monitoring-v2-configuration/examples.md | 8 ++++---- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/docs/reference-guides/monitoring-v2-configuration/examples.md b/docs/reference-guides/monitoring-v2-configuration/examples.md index 1660d70a31f1..04b17ca9f5e3 100644 --- a/docs/reference-guides/monitoring-v2-configuration/examples.md +++ b/docs/reference-guides/monitoring-v2-configuration/examples.md @@ -6,15 +6,15 @@ title: Monitoring Configuration Examples -### ServiceMonitor +## ServiceMonitor See the official prometheus-operator GitHub repo for an example [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) YAML. -### PodMonitor +## PodMonitor See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/getting-started/#using-podmonitors) for an example PodMonitor and an example Prometheus resource that refers to a PodMonitor. -### PrometheusRule +## PrometheusRule A PrometheusRule contains the alerting and recording rules that you would usually place in a [Prometheus rule file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). @@ -22,6 +22,6 @@ For a more fine-grained approach, the `ruleSelector` field on a Prometheus resou See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/alerting/) for an example PrometheusRule. -### Alertmanager Config +## Alertmanager Config See the Rancher docs page on Receivers for an example [Alertmanager config](./receivers.md#example-alertmanager-configs). diff --git a/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/examples.md b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/examples.md index d6ef19202562..31ae3fa66bd3 100644 --- a/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/examples.md +++ b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/examples.md @@ -6,15 +6,15 @@ title: Monitoring V2 Configuration Examples -### ServiceMonitor +## ServiceMonitor See the official prometheus-operator GitHub repo for an example [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) YAML. -### PodMonitor +## PodMonitor See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/getting-started/#using-podmonitors) for an example PodMonitor and an example Prometheus resource that refers to a PodMonitor. -### PrometheusRule +## PrometheusRule A PrometheusRule contains the alerting and recording rules that you would usually place in a [Prometheus rule file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). @@ -22,6 +22,6 @@ For a more fine-grained approach, the `ruleSelector` field on a Prometheus resou See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/alerting/) for an example PrometheusRule. -### Alertmanager Config +## Alertmanager Config See the Rancher docs page on Receivers for an example [Alertmanager config](./receivers.md#example-alertmanager-configs). diff --git a/versioned_docs/version-2.6/reference-guides/monitoring-v2-configuration/examples.md b/versioned_docs/version-2.6/reference-guides/monitoring-v2-configuration/examples.md index 1660d70a31f1..04b17ca9f5e3 100644 --- a/versioned_docs/version-2.6/reference-guides/monitoring-v2-configuration/examples.md +++ b/versioned_docs/version-2.6/reference-guides/monitoring-v2-configuration/examples.md @@ -6,15 +6,15 @@ title: Monitoring Configuration Examples -### ServiceMonitor +## ServiceMonitor See the official prometheus-operator GitHub repo for an example [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) YAML. -### PodMonitor +## PodMonitor See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/getting-started/#using-podmonitors) for an example PodMonitor and an example Prometheus resource that refers to a PodMonitor. -### PrometheusRule +## PrometheusRule A PrometheusRule contains the alerting and recording rules that you would usually place in a [Prometheus rule file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). @@ -22,6 +22,6 @@ For a more fine-grained approach, the `ruleSelector` field on a Prometheus resou See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/alerting/) for an example PrometheusRule. -### Alertmanager Config +## Alertmanager Config See the Rancher docs page on Receivers for an example [Alertmanager config](./receivers.md#example-alertmanager-configs). diff --git a/versioned_docs/version-2.7/reference-guides/monitoring-v2-configuration/examples.md b/versioned_docs/version-2.7/reference-guides/monitoring-v2-configuration/examples.md index 1660d70a31f1..04b17ca9f5e3 100644 --- a/versioned_docs/version-2.7/reference-guides/monitoring-v2-configuration/examples.md +++ b/versioned_docs/version-2.7/reference-guides/monitoring-v2-configuration/examples.md @@ -6,15 +6,15 @@ title: Monitoring Configuration Examples -### ServiceMonitor +## ServiceMonitor See the official prometheus-operator GitHub repo for an example [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) YAML. -### PodMonitor +## PodMonitor See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/getting-started/#using-podmonitors) for an example PodMonitor and an example Prometheus resource that refers to a PodMonitor. -### PrometheusRule +## PrometheusRule A PrometheusRule contains the alerting and recording rules that you would usually place in a [Prometheus rule file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). @@ -22,6 +22,6 @@ For a more fine-grained approach, the `ruleSelector` field on a Prometheus resou See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/alerting/) for an example PrometheusRule. -### Alertmanager Config +## Alertmanager Config See the Rancher docs page on Receivers for an example [Alertmanager config](./receivers.md#example-alertmanager-configs). diff --git a/versioned_docs/version-2.8/reference-guides/monitoring-v2-configuration/examples.md b/versioned_docs/version-2.8/reference-guides/monitoring-v2-configuration/examples.md index 1660d70a31f1..04b17ca9f5e3 100644 --- a/versioned_docs/version-2.8/reference-guides/monitoring-v2-configuration/examples.md +++ b/versioned_docs/version-2.8/reference-guides/monitoring-v2-configuration/examples.md @@ -6,15 +6,15 @@ title: Monitoring Configuration Examples -### ServiceMonitor +## ServiceMonitor See the official prometheus-operator GitHub repo for an example [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) YAML. -### PodMonitor +## PodMonitor See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/getting-started/#using-podmonitors) for an example PodMonitor and an example Prometheus resource that refers to a PodMonitor. -### PrometheusRule +## PrometheusRule A PrometheusRule contains the alerting and recording rules that you would usually place in a [Prometheus rule file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). @@ -22,6 +22,6 @@ For a more fine-grained approach, the `ruleSelector` field on a Prometheus resou See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/alerting/) for an example PrometheusRule. -### Alertmanager Config +## Alertmanager Config See the Rancher docs page on Receivers for an example [Alertmanager config](./receivers.md#example-alertmanager-configs). diff --git a/versioned_docs/version-2.9/reference-guides/monitoring-v2-configuration/examples.md b/versioned_docs/version-2.9/reference-guides/monitoring-v2-configuration/examples.md index 1660d70a31f1..04b17ca9f5e3 100644 --- a/versioned_docs/version-2.9/reference-guides/monitoring-v2-configuration/examples.md +++ b/versioned_docs/version-2.9/reference-guides/monitoring-v2-configuration/examples.md @@ -6,15 +6,15 @@ title: Monitoring Configuration Examples -### ServiceMonitor +## ServiceMonitor See the official prometheus-operator GitHub repo for an example [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) YAML. -### PodMonitor +## PodMonitor See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/getting-started/#using-podmonitors) for an example PodMonitor and an example Prometheus resource that refers to a PodMonitor. -### PrometheusRule +## PrometheusRule A PrometheusRule contains the alerting and recording rules that you would usually place in a [Prometheus rule file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). @@ -22,6 +22,6 @@ For a more fine-grained approach, the `ruleSelector` field on a Prometheus resou See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/alerting/) for an example PrometheusRule. -### Alertmanager Config +## Alertmanager Config See the Rancher docs page on Receivers for an example [Alertmanager config](./receivers.md#example-alertmanager-configs). From b7cb62db47a8b64a8b225f34f35d433f05fae734 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 14:25:30 -0400 Subject: [PATCH 30/31] fixed servicemonitors-and-podmonitors.md --- .../servicemonitors-and-podmonitors.md | 4 ++-- .../servicemonitors-and-podmonitors.md | 4 ++-- .../servicemonitors-and-podmonitors.md | 4 ++-- .../servicemonitors-and-podmonitors.md | 4 ++-- .../servicemonitors-and-podmonitors.md | 4 ++-- .../servicemonitors-and-podmonitors.md | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md b/docs/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md index 30fccb634c80..1eb42e346e49 100644 --- a/docs/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md +++ b/docs/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md @@ -18,7 +18,7 @@ This section assumes familiarity with how monitoring components work together. F ::: -### ServiceMonitors +## ServiceMonitors This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how groups of Kubernetes services should be monitored. @@ -28,7 +28,7 @@ Any Services in your cluster that match the labels located within the ServiceMon For more information about how ServiceMonitors work, refer to the [Prometheus Operator documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md) -### PodMonitors +## PodMonitors This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how group of pods should be monitored. diff --git a/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md index 6e7150b11051..6b2037878108 100644 --- a/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md +++ b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md @@ -14,7 +14,7 @@ ServiceMonitors are more commonly used than PodMonitors, and we recommend them f > This section assumes familiarity with how monitoring components work together. For more information, see [this section.](../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) -### ServiceMonitors +## ServiceMonitors This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how groups of Kubernetes services should be monitored. @@ -24,7 +24,7 @@ Any Services in your cluster that match the labels located within the ServiceMon For more information about how ServiceMonitors work, refer to the [Prometheus Operator documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md) -### PodMonitors +## PodMonitors This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how group of pods should be monitored. diff --git a/versioned_docs/version-2.6/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md b/versioned_docs/version-2.6/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md index 30fccb634c80..1eb42e346e49 100644 --- a/versioned_docs/version-2.6/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md +++ b/versioned_docs/version-2.6/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md @@ -18,7 +18,7 @@ This section assumes familiarity with how monitoring components work together. F ::: -### ServiceMonitors +## ServiceMonitors This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how groups of Kubernetes services should be monitored. @@ -28,7 +28,7 @@ Any Services in your cluster that match the labels located within the ServiceMon For more information about how ServiceMonitors work, refer to the [Prometheus Operator documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md) -### PodMonitors +## PodMonitors This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how group of pods should be monitored. diff --git a/versioned_docs/version-2.7/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md b/versioned_docs/version-2.7/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md index 30fccb634c80..1eb42e346e49 100644 --- a/versioned_docs/version-2.7/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md +++ b/versioned_docs/version-2.7/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md @@ -18,7 +18,7 @@ This section assumes familiarity with how monitoring components work together. F ::: -### ServiceMonitors +## ServiceMonitors This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how groups of Kubernetes services should be monitored. @@ -28,7 +28,7 @@ Any Services in your cluster that match the labels located within the ServiceMon For more information about how ServiceMonitors work, refer to the [Prometheus Operator documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md) -### PodMonitors +## PodMonitors This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how group of pods should be monitored. diff --git a/versioned_docs/version-2.8/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md b/versioned_docs/version-2.8/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md index 30fccb634c80..1eb42e346e49 100644 --- a/versioned_docs/version-2.8/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md +++ b/versioned_docs/version-2.8/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md @@ -18,7 +18,7 @@ This section assumes familiarity with how monitoring components work together. F ::: -### ServiceMonitors +## ServiceMonitors This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how groups of Kubernetes services should be monitored. @@ -28,7 +28,7 @@ Any Services in your cluster that match the labels located within the ServiceMon For more information about how ServiceMonitors work, refer to the [Prometheus Operator documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md) -### PodMonitors +## PodMonitors This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how group of pods should be monitored. diff --git a/versioned_docs/version-2.9/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md b/versioned_docs/version-2.9/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md index 30fccb634c80..1eb42e346e49 100644 --- a/versioned_docs/version-2.9/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md +++ b/versioned_docs/version-2.9/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md @@ -18,7 +18,7 @@ This section assumes familiarity with how monitoring components work together. F ::: -### ServiceMonitors +## ServiceMonitors This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how groups of Kubernetes services should be monitored. @@ -28,7 +28,7 @@ Any Services in your cluster that match the labels located within the ServiceMon For more information about how ServiceMonitors work, refer to the [Prometheus Operator documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md) -### PodMonitors +## PodMonitors This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how group of pods should be monitored. From 5ea21e50e31cd78e65ed7de57d61b8035fb3a55d Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 17 Sep 2024 17:08:39 -0400 Subject: [PATCH 31/31] fixed other-troubleshooting-tips/dns.md --- .../other-troubleshooting-tips/dns.md | 20 +++++++++---------- .../other-troubleshooting-tips/dns.md | 20 +++++++++---------- .../other-troubleshooting-tips/dns.md | 20 +++++++++---------- .../other-troubleshooting-tips/dns.md | 20 +++++++++---------- .../other-troubleshooting-tips/dns.md | 20 +++++++++---------- .../other-troubleshooting-tips/dns.md | 20 +++++++++---------- .../other-troubleshooting-tips/dns.md | 20 +++++++++---------- 7 files changed, 70 insertions(+), 70 deletions(-) diff --git a/docs/troubleshooting/other-troubleshooting-tips/dns.md b/docs/troubleshooting/other-troubleshooting-tips/dns.md index af1108b6f80b..b4a6989f6b71 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/dns.md +++ b/docs/troubleshooting/other-troubleshooting-tips/dns.md @@ -12,7 +12,7 @@ Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG Before running the DNS checks, check the [default DNS provider](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly](networking.md#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. -### Check if DNS pods are running +## Check if DNS pods are running ``` kubectl -n kube-system get pods -l k8s-app=kube-dns @@ -30,7 +30,7 @@ NAME READY STATUS RESTARTS AGE kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s ``` -### Check if the DNS service is present with the correct cluster-ip +## Check if the DNS service is present with the correct cluster-ip ``` kubectl -n kube-system get svc -l k8s-app=kube-dns @@ -41,7 +41,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s ``` -### Check if domain names are resolving +## Check if domain names are resolving Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. @@ -132,15 +132,15 @@ command terminated with exit code 1 Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. -### CoreDNS specific +## CoreDNS specific -#### Check CoreDNS logging +### Check CoreDNS logging ``` kubectl -n kube-system logs -l k8s-app=kube-dns ``` -#### Check configuration +### Check configuration CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. @@ -148,7 +148,7 @@ CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} ``` -#### Check upstream nameservers in resolv.conf +### Check upstream nameservers in resolv.conf By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. @@ -156,7 +156,7 @@ By default, the configured nameservers on the host (in `/etc/resolv.conf`) will kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' ``` -#### Enable query logging +### Enable query logging Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: @@ -166,9 +166,9 @@ kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). -### kube-dns specific +## kube-dns specific -#### Check upstream nameservers in kubedns container +### Check upstream nameservers in kubedns container By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. We detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md index ce867e9269fd..e328b3352966 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md @@ -12,7 +12,7 @@ Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG Before running the DNS checks, check the [default DNS provider](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly](networking.md#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. -### Check if DNS pods are running +## Check if DNS pods are running ``` kubectl -n kube-system get pods -l k8s-app=kube-dns @@ -30,7 +30,7 @@ NAME READY STATUS RESTARTS AGE kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s ``` -### Check if the DNS service is present with the correct cluster-ip +## Check if the DNS service is present with the correct cluster-ip ``` kubectl -n kube-system get svc -l k8s-app=kube-dns @@ -41,7 +41,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s ``` -### Check if domain names are resolving +## Check if domain names are resolving Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. @@ -132,15 +132,15 @@ command terminated with exit code 1 Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. -### CoreDNS specific +## CoreDNS specific -#### Check CoreDNS logging +### Check CoreDNS logging ``` kubectl -n kube-system logs -l k8s-app=kube-dns ``` -#### Check configuration +### Check configuration CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. @@ -148,7 +148,7 @@ CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} ``` -#### Check upstream nameservers in resolv.conf +### Check upstream nameservers in resolv.conf By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. @@ -156,7 +156,7 @@ By default, the configured nameservers on the host (in `/etc/resolv.conf`) will kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' ``` -#### Enable query logging +### Enable query logging Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: @@ -166,9 +166,9 @@ kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). -### kube-dns specific +## kube-dns specific -#### Check upstream nameservers in kubedns container +### Check upstream nameservers in kubedns container By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. Since Rancher v2.0.7, we detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). diff --git a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/dns.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/dns.md index 255df3440166..8a289e722cd4 100644 --- a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/dns.md +++ b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/dns.md @@ -12,7 +12,7 @@ Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG Before running the DNS checks, check the [default DNS provider](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#de) for your cluster and make sure that [the overlay network is functioning correctly](networking.md#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. -### Check if DNS pods are running +## Check if DNS pods are running ``` kubectl -n kube-system get pods -l k8s-app=kube-dns @@ -30,7 +30,7 @@ NAME READY STATUS RESTARTS AGE kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s ``` -### Check if the DNS service is present with the correct cluster-ip +## Check if the DNS service is present with the correct cluster-ip ``` kubectl -n kube-system get svc -l k8s-app=kube-dns @@ -41,7 +41,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s ``` -### Check if domain names are resolving +## Check if domain names are resolving Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. @@ -132,15 +132,15 @@ command terminated with exit code 1 Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. -### CoreDNS specific +## CoreDNS specific -#### Check CoreDNS logging +### Check CoreDNS logging ``` kubectl -n kube-system logs -l k8s-app=kube-dns ``` -#### Check configuration +### Check configuration CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. @@ -148,7 +148,7 @@ CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} ``` -#### Check upstream nameservers in resolv.conf +### Check upstream nameservers in resolv.conf By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. @@ -156,7 +156,7 @@ By default, the configured nameservers on the host (in `/etc/resolv.conf`) will kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' ``` -#### Enable query logging +### Enable query logging Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: @@ -166,9 +166,9 @@ kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). -### kube-dns specific +## kube-dns specific -#### Check upstream nameservers in kubedns container +### Check upstream nameservers in kubedns container By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. We detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). diff --git a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/dns.md b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/dns.md index af1108b6f80b..b4a6989f6b71 100644 --- a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/dns.md +++ b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/dns.md @@ -12,7 +12,7 @@ Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG Before running the DNS checks, check the [default DNS provider](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly](networking.md#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. -### Check if DNS pods are running +## Check if DNS pods are running ``` kubectl -n kube-system get pods -l k8s-app=kube-dns @@ -30,7 +30,7 @@ NAME READY STATUS RESTARTS AGE kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s ``` -### Check if the DNS service is present with the correct cluster-ip +## Check if the DNS service is present with the correct cluster-ip ``` kubectl -n kube-system get svc -l k8s-app=kube-dns @@ -41,7 +41,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s ``` -### Check if domain names are resolving +## Check if domain names are resolving Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. @@ -132,15 +132,15 @@ command terminated with exit code 1 Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. -### CoreDNS specific +## CoreDNS specific -#### Check CoreDNS logging +### Check CoreDNS logging ``` kubectl -n kube-system logs -l k8s-app=kube-dns ``` -#### Check configuration +### Check configuration CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. @@ -148,7 +148,7 @@ CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} ``` -#### Check upstream nameservers in resolv.conf +### Check upstream nameservers in resolv.conf By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. @@ -156,7 +156,7 @@ By default, the configured nameservers on the host (in `/etc/resolv.conf`) will kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' ``` -#### Enable query logging +### Enable query logging Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: @@ -166,9 +166,9 @@ kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). -### kube-dns specific +## kube-dns specific -#### Check upstream nameservers in kubedns container +### Check upstream nameservers in kubedns container By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. We detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). diff --git a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/dns.md b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/dns.md index af1108b6f80b..b4a6989f6b71 100644 --- a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/dns.md +++ b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/dns.md @@ -12,7 +12,7 @@ Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG Before running the DNS checks, check the [default DNS provider](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly](networking.md#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. -### Check if DNS pods are running +## Check if DNS pods are running ``` kubectl -n kube-system get pods -l k8s-app=kube-dns @@ -30,7 +30,7 @@ NAME READY STATUS RESTARTS AGE kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s ``` -### Check if the DNS service is present with the correct cluster-ip +## Check if the DNS service is present with the correct cluster-ip ``` kubectl -n kube-system get svc -l k8s-app=kube-dns @@ -41,7 +41,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s ``` -### Check if domain names are resolving +## Check if domain names are resolving Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. @@ -132,15 +132,15 @@ command terminated with exit code 1 Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. -### CoreDNS specific +## CoreDNS specific -#### Check CoreDNS logging +### Check CoreDNS logging ``` kubectl -n kube-system logs -l k8s-app=kube-dns ``` -#### Check configuration +### Check configuration CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. @@ -148,7 +148,7 @@ CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} ``` -#### Check upstream nameservers in resolv.conf +### Check upstream nameservers in resolv.conf By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. @@ -156,7 +156,7 @@ By default, the configured nameservers on the host (in `/etc/resolv.conf`) will kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' ``` -#### Enable query logging +### Enable query logging Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: @@ -166,9 +166,9 @@ kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). -### kube-dns specific +## kube-dns specific -#### Check upstream nameservers in kubedns container +### Check upstream nameservers in kubedns container By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. We detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). diff --git a/versioned_docs/version-2.8/troubleshooting/other-troubleshooting-tips/dns.md b/versioned_docs/version-2.8/troubleshooting/other-troubleshooting-tips/dns.md index af1108b6f80b..b4a6989f6b71 100644 --- a/versioned_docs/version-2.8/troubleshooting/other-troubleshooting-tips/dns.md +++ b/versioned_docs/version-2.8/troubleshooting/other-troubleshooting-tips/dns.md @@ -12,7 +12,7 @@ Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG Before running the DNS checks, check the [default DNS provider](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly](networking.md#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. -### Check if DNS pods are running +## Check if DNS pods are running ``` kubectl -n kube-system get pods -l k8s-app=kube-dns @@ -30,7 +30,7 @@ NAME READY STATUS RESTARTS AGE kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s ``` -### Check if the DNS service is present with the correct cluster-ip +## Check if the DNS service is present with the correct cluster-ip ``` kubectl -n kube-system get svc -l k8s-app=kube-dns @@ -41,7 +41,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s ``` -### Check if domain names are resolving +## Check if domain names are resolving Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. @@ -132,15 +132,15 @@ command terminated with exit code 1 Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. -### CoreDNS specific +## CoreDNS specific -#### Check CoreDNS logging +### Check CoreDNS logging ``` kubectl -n kube-system logs -l k8s-app=kube-dns ``` -#### Check configuration +### Check configuration CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. @@ -148,7 +148,7 @@ CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} ``` -#### Check upstream nameservers in resolv.conf +### Check upstream nameservers in resolv.conf By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. @@ -156,7 +156,7 @@ By default, the configured nameservers on the host (in `/etc/resolv.conf`) will kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' ``` -#### Enable query logging +### Enable query logging Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: @@ -166,9 +166,9 @@ kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). -### kube-dns specific +## kube-dns specific -#### Check upstream nameservers in kubedns container +### Check upstream nameservers in kubedns container By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. We detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). diff --git a/versioned_docs/version-2.9/troubleshooting/other-troubleshooting-tips/dns.md b/versioned_docs/version-2.9/troubleshooting/other-troubleshooting-tips/dns.md index af1108b6f80b..b4a6989f6b71 100644 --- a/versioned_docs/version-2.9/troubleshooting/other-troubleshooting-tips/dns.md +++ b/versioned_docs/version-2.9/troubleshooting/other-troubleshooting-tips/dns.md @@ -12,7 +12,7 @@ Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG Before running the DNS checks, check the [default DNS provider](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly](networking.md#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. -### Check if DNS pods are running +## Check if DNS pods are running ``` kubectl -n kube-system get pods -l k8s-app=kube-dns @@ -30,7 +30,7 @@ NAME READY STATUS RESTARTS AGE kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s ``` -### Check if the DNS service is present with the correct cluster-ip +## Check if the DNS service is present with the correct cluster-ip ``` kubectl -n kube-system get svc -l k8s-app=kube-dns @@ -41,7 +41,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s ``` -### Check if domain names are resolving +## Check if domain names are resolving Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. @@ -132,15 +132,15 @@ command terminated with exit code 1 Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. -### CoreDNS specific +## CoreDNS specific -#### Check CoreDNS logging +### Check CoreDNS logging ``` kubectl -n kube-system logs -l k8s-app=kube-dns ``` -#### Check configuration +### Check configuration CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. @@ -148,7 +148,7 @@ CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} ``` -#### Check upstream nameservers in resolv.conf +### Check upstream nameservers in resolv.conf By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. @@ -156,7 +156,7 @@ By default, the configured nameservers on the host (in `/etc/resolv.conf`) will kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' ``` -#### Enable query logging +### Enable query logging Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: @@ -166,9 +166,9 @@ kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). -### kube-dns specific +## kube-dns specific -#### Check upstream nameservers in kubedns container +### Check upstream nameservers in kubedns container By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. We detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`).