mirror of
https://github.com/rancher/rancher-docs.git
synced 2026-05-16 10:03:28 +00:00
Merge pull request #4004 from rancher/revert-4002-staging
Revert "Staging to master" due to rendering problems
This commit is contained in:
+1
-1
@@ -10,4 +10,4 @@ RUN mkdir -p /output /theme/rancher-website-theme && tar -xzf /run/master.tar.gz
|
||||
# Expose default hugo port
|
||||
EXPOSE 9001
|
||||
|
||||
ENTRYPOINT ["hugo", "serve", "--bind=0.0.0.0", "--buildDrafts", "--buildFuture" ]
|
||||
ENTRYPOINT ["hugo", "serve", "--bind=0.0.0.0", "--buildDrafts", "--buildFuture", "--baseURL=" ]
|
||||
|
||||
+1
-1
@@ -16,7 +16,7 @@ COPY .git .git
|
||||
ADD https://github.com/rancherlabs/website-theme/archive/master.tar.gz /run/master.tar.gz
|
||||
RUN mkdir -p /output /theme/rancher-website-theme && tar -xzf /run/master.tar.gz -C /run/node_modules/rancher-website-theme --strip=1 && rm /run/master.tar.gz
|
||||
|
||||
RUN ["hugo", "--buildFuture", "--destination=/output"]
|
||||
RUN ["hugo", "--buildFuture", "--baseURL=https://rancher.com/docs", "--destination=/output"]
|
||||
|
||||
# Make sure something got built
|
||||
RUN stat /output/index.html
|
||||
|
||||
+1
-1
@@ -16,7 +16,7 @@ COPY .git .git
|
||||
ADD https://github.com/rancherlabs/website-theme/archive/master.tar.gz /run/master.tar.gz
|
||||
RUN mkdir -p /output /theme/rancher-website-theme && tar -xzf /run/master.tar.gz -C /run/node_modules/rancher-website-theme --strip=1 && rm /run/master.tar.gz
|
||||
|
||||
RUN ["hugo", "--buildDrafts", "--buildFuture", "--destination=/output"]
|
||||
RUN ["hugo", "--buildDrafts", "--buildFuture", "--baseURL=https://staging.rancher.com/docs", "--destination=/output"]
|
||||
|
||||
# Make sure something got built
|
||||
RUN stat /output/index.html
|
||||
|
||||
+1
-1
@@ -1,4 +1,4 @@
|
||||
baseURL = ".Permalink"
|
||||
baseURL = ""
|
||||
languageCode = "en-us"
|
||||
title = "Rancher Labs"
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ If a pod does not meet the conditions specified in the PSP, Kubernetes will not
|
||||
|
||||
- [How PSPs Work](#how-psps-work)
|
||||
- [Default PSPs](#default-psps)
|
||||
- [Restricted-NoRoot](#restricted-noroot)
|
||||
- [Restricted](#restricted)
|
||||
- [Unrestricted](#unrestricted)
|
||||
- [Creating PSPs](#creating-psps)
|
||||
@@ -29,22 +28,18 @@ PSPs work through inheritance:
|
||||
|
||||
Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked if it complies with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP.
|
||||
|
||||
Read more about Pod Security Policies in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/).
|
||||
Read more about Pod Security Policies in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/).
|
||||
|
||||
# Default PSPs
|
||||
|
||||
Rancher ships with three default Pod Security Policies (PSPs): the `restricted-noroot`, `restricted` and `unrestricted` policies.
|
||||
Rancher ships with two default Pod Security Policies (PSPs): the `restricted` and `unrestricted` policies.
|
||||
|
||||
### Restricted-NoRoot
|
||||
### Restricted
|
||||
|
||||
This policy is based on the Kubernetes [example restricted policy](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/restricted-psp.yaml). It significantly restricts what types of pods can be deployed to a cluster or project. This policy:
|
||||
|
||||
- Prevents pods from running as a privileged user and prevents escalation of privileges.
|
||||
- Validates that server-required security mechanisms are in place, such as restricting what volumes can be mounted to only the core volume types and preventing root supplemental groups from being added.
|
||||
|
||||
### Restricted
|
||||
|
||||
This policy is a relaxed version of the `restricted-noroot` policy, with almost all the restrictions in place, except for the fact that it allows running containers as a privileged user.
|
||||
- Validates that server-required security mechanisms are in place (such as restricting what volumes can be mounted to only the core volume types and preventing root supplemental groups from being added.
|
||||
|
||||
### Unrestricted
|
||||
|
||||
@@ -56,9 +51,9 @@ Using Rancher, you can create a Pod Security Policy using our GUI rather than cr
|
||||
|
||||
### Requirements
|
||||
|
||||
Rancher can only assign PSPs for clusters that are [launched using RKE]({{< baseurl >}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/).
|
||||
Rancher can only assign PSPs for clusters that are [launched using RKE.]({{< baseurl >}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/)
|
||||
|
||||
You must enable PSPs at the cluster level before you can assign them to a project. This can be configured by [editing the cluster]({{<baseurl>}}/rancher/v2.6/en/cluster-admin/editing-clusters/).
|
||||
You must enable PSPs at the cluster level before you can assign them to a project. This can be configured by [editing the cluster.]({{<baseurl>}}/rancher/v2.6/en/cluster-admin/editing-clusters/)
|
||||
|
||||
It is a best practice to set PSP at the cluster level.
|
||||
|
||||
@@ -68,11 +63,19 @@ We recommend adding PSPs during cluster and project creation instead of adding i
|
||||
|
||||
1. In the upper left corner, click **☰ > Cluster Management**.
|
||||
1. In the left navigation bar, click **Pod Security Policies**.
|
||||
1. Click **Add Policy**.
|
||||
1. Click **Add policy**.
|
||||
1. Name the policy.
|
||||
1. Complete each section of the form. Refer to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for more information on what each policy does.
|
||||
1. Click **Create**.
|
||||
|
||||
# Configuration
|
||||
|
||||
The Kubernetes documentation on PSPs is [here](https://kubernetes.io/docs/concepts/policy/pod-security-policy/).
|
||||
The Kubernetes documentation on PSPs is [here.](https://kubernetes.io/docs/concepts/policy/pod-security-policy/)
|
||||
|
||||
|
||||
|
||||
<!-- links -->
|
||||
|
||||
[1]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#volumes-and-file-systems
|
||||
[2]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces
|
||||
[3]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#users-and-groups
|
||||
|
||||
@@ -5,7 +5,7 @@ weight: 1127
|
||||
|
||||
Cluster and project roles define user authorization inside a cluster or project.
|
||||
|
||||
To manage these roles,
|
||||
To manage these roles,
|
||||
|
||||
1. Click **☰ > Users & Authentication**.
|
||||
1. In the left navigation bar, click **Roles** and go to the **Cluster** or **Project/Namespaces** tab.
|
||||
@@ -60,8 +60,8 @@ The following table lists the permissions available for the `Manage Nodes` role
|
||||
| SSH Access | ✓ | ✓ |
|
||||
| Delete Nodes | ✓ | ✓ |
|
||||
| Scale Clusters Up and Down | ✓ | * |
|
||||
***In RKE2, you must have permission to edit a cluster to be able to scale clusters up and down.**
|
||||
<br />
|
||||
***In RKE2, you must have permission to edit a cluster to be able to scale clusters up and down.**
|
||||
<br />
|
||||
|
||||
For details on how each cluster role can access Kubernetes resources, you can look them up in the Rancher UI:
|
||||
|
||||
@@ -81,24 +81,12 @@ To assign a custom role to a new cluster member, you can use the Rancher UI. To
|
||||
|
||||
To assign the role to a new cluster member,
|
||||
|
||||
{{% tabs %}}
|
||||
{{% tab "Rancher before v2.6.4" %}}
|
||||
1. Click **☰ > Cluster Management**.
|
||||
1. Go to the cluster where you want to assign a role to a member and click **Explore**.
|
||||
1. Click **RBAC > Cluster Members**.
|
||||
1. Click **Add**.
|
||||
1. In the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member.
|
||||
1. Click **Create**.
|
||||
{{% /tab %}}
|
||||
{{% tab "Rancher v2.6.4+" %}}
|
||||
1. Click **☰ > Cluster Management**.
|
||||
1. Go to the cluster where you want to assign a role to a member and click **Explore**.
|
||||
1. Click **Cluster > Cluster Members**.
|
||||
1. Click **Add**.
|
||||
1. In the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member.
|
||||
1. Click **Create**.
|
||||
{{% /tab %}}
|
||||
{{% /tabs %}}
|
||||
|
||||
**Result:** The member has the assigned role.
|
||||
|
||||
|
||||
@@ -5,22 +5,7 @@ weight: 24
|
||||
|
||||
## How to use the API
|
||||
|
||||
The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it:
|
||||
|
||||
{{% tabs %}}
|
||||
{{% tab "Rancher v2.6.4+" %}}
|
||||
|
||||
1. Click on your user avatar in the upper right corner.
|
||||
1. Click **Account & API Keys**.
|
||||
1. Under the **API Keys** section, find the **API Endpoint** field and click the link. The link will look something like `https://<RANCHER_FQDN>/v3`, where `<RANCHER_FQDN>` is the fully qualified domain name of your Rancher deployment.
|
||||
|
||||
{{% /tab %}}
|
||||
{{% tab "Rancher before v2.6.4" %}}
|
||||
|
||||
Go to the URL endpoint at `https://<RANCHER_FQDN>/v3`, where `<RANCHER_FQDN>` is the fully qualified domain name of your Rancher deployment.
|
||||
|
||||
{{% /tab %}}
|
||||
{{% /tabs %}}
|
||||
The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint (`https://<rancher_fqdn>/v3`) as well as create [API keys]({{<baseurl>}}/rancher/v2.6/en/user-settings/api-keys/).
|
||||
|
||||
## Authentication
|
||||
|
||||
|
||||
@@ -5,15 +5,6 @@ weight: 2
|
||||
|
||||
A restore is performed by creating a Restore custom resource.
|
||||
|
||||
---
|
||||
**Caution:** Before performing a restore or rollback, note the following:
|
||||
|
||||
- In Rancher v2.6.4, the cluster-api module has been upgraded from v0.4.4 to v1.0.2 in which the apiVersion of CAPI CRDs are upgraded from `cluster.x-k8s.io/v1alpha4` to `cluster.x-k8s.io/v1beta1`. This has the effect of causing rollbacks from Rancher v2.6.4 to any previous version of Rancher v2.6.x to fail because the previous version the CRDs needed to roll back are no longer available in v1beta1.
|
||||
|
||||
- To avoid this, the Rancher resource cleanup scripts should be run **before** the restore or rollback is attempted. Specifically, two scripts have been created to assist you: one to clean up the cluster (`cleanup.sh`), and one to check for any Rancher-related resources in the cluster (`verify.sh`). Details on the cleanup script can be found in the [rancherlabs/support-tools repo](https://github.com/rancherlabs/support-tools/tree/master/cleanup-rancher-k8s-resources).
|
||||
|
||||
---
|
||||
|
||||
> **Important**
|
||||
>
|
||||
> * Follow the instructions from this page for restoring rancher on the same cluster where it was backed up from. In order to migrate rancher to a new cluster, follow the steps to [migrate rancher.]({{<baseurl>}}/rancher/v2.6/en/backups/migrating-rancher)
|
||||
|
||||
@@ -109,12 +109,6 @@ Calico also provides a stateless IP-in-IP or VXLAN encapsulation mode that can b
|
||||
|
||||
Kubernetes workers should open TCP port `179` if using BGP or UDP port `4789` if using VXLAN encapsulation. In addition, TCP port `5473` is needed when using Typha. See [the port requirements for user clusters]({{<baseurl>}}/rancher/v2.6/en/cluster-provisioning/node-requirements/#networking-requirements) for more details.
|
||||
|
||||
>**Important:** In Rancher v2.6.3, Calico probes fail on Windows nodes upon RKE2 installation. <b>Note that this issue is resolved in v2.6.4.<b>
|
||||
>
|
||||
>- To work around this issue, first navigate to `https://<rancherserverurl>/v3/settings/windows-rke2-install-script`.
|
||||
>
|
||||
>- There, change the current setting: `https://raw.githubusercontent.com/rancher/wins/v0.1.3/install.ps1` to this new setting: `https://raw.githubusercontent.com/rancher/rke2/master/windows/rke2-install.ps1`.
|
||||
|
||||

|
||||
|
||||
For more information, see the following pages:
|
||||
|
||||
@@ -74,8 +74,7 @@ To add a private CA for Helm Chart repositories:
|
||||
[...]
|
||||
```
|
||||
|
||||
|
||||
- **Git-based chart repositories**: It is not currently possible to add a private CA. For git-based chart repositories with a certificate signed by a private CA, you must disable TLS verification. Click **Edit YAML** for the chart repo and add the key/value pair as follows:
|
||||
- **Git-based chart repositories**: You must add a base64 encoded copy of the CA certificate in DER format to the spec.caBundle field of the chart repo, such as `openssl x509 -outform der -in ca.pem | base64 -w0`. Click **Edit YAML** for the chart repo and set, as in the following example:</br>
|
||||
```
|
||||
[...]
|
||||
spec:
|
||||
|
||||
@@ -104,10 +104,6 @@ There are three recommended options for the source of the certificate used for T
|
||||
|
||||
### 4. Install cert-manager
|
||||
|
||||
**Note:** New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022.
|
||||
|
||||
> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer]({{<baseurl>}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination).
|
||||
|
||||
> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer]({{<baseurl>}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination).
|
||||
|
||||
This step is only required to use certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) or to request Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`).
|
||||
@@ -120,7 +116,7 @@ These instructions are adapted from the [official cert-manager documentation](ht
|
||||
|
||||
```
|
||||
# If you have installed the CRDs manually instead of with the `--set installCRDs=true` option added to your Helm install command, you should upgrade your CRD resources before upgrading the Helm chart:
|
||||
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.7.1/cert-manager.crds.yaml
|
||||
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.1/cert-manager.crds.yaml
|
||||
|
||||
# Add the Jetstack Helm repository
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
@@ -132,7 +128,7 @@ helm repo update
|
||||
helm install cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager \
|
||||
--create-namespace \
|
||||
--version v1.7.1
|
||||
--version v1.5.1
|
||||
```
|
||||
|
||||
Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods:
|
||||
|
||||
@@ -148,13 +148,7 @@ helm upgrade rancher rancher-<CHART_REPO>/rancher \
|
||||
|
||||
### Option B: Reinstalling Rancher and cert-manager
|
||||
|
||||
If you are currently running the cert-manager whose version is 1.5 or below, and want to upgrade both Rancher and cert-manager to a new version (1.6+ in the case of cert-manager), then you need to re-install both Rancher and cert-manager due to the API change in cert-manager 1.6. This will also be necessary if you are upgrading from a version of cert manager below 0.11 to a version of cert-manager above 0.11.
|
||||
|
||||
>**Important:**
|
||||
>
|
||||
>- New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022.
|
||||
>
|
||||
>- Note that if you are below version 1.5 and want to go to 1.7, you should first upgrade to 1.6 as an intermediate step. Follow the cert-manager docs [here](https://cert-manager.io/docs/installation/upgrading/upgrading-1.5-1.6/) to do a 1.5 to 1.6 upgrade, and [here](https://cert-manager.io/docs/installation/upgrading/upgrading-1.6-1.7/) to then do a 1.6 to 1.7 upgrade. For more details on upgrading cert-manager, refer to our [documentation]({{<baseurl>}}/rancher/v2.6/en/installation/resources/upgrading-cert-manager).
|
||||
If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manager due to the API change in cert-manager v0.11.
|
||||
|
||||
1. Uninstall Rancher
|
||||
|
||||
|
||||
+3
-5
@@ -94,10 +94,8 @@ helm repo update
|
||||
|
||||
Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager).
|
||||
|
||||
**Note:** New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022.
|
||||
|
||||
```plain
|
||||
helm fetch jetstack/cert-manager --version v1.7.1
|
||||
helm fetch jetstack/cert-manager --version v1.5.1
|
||||
```
|
||||
|
||||
### 3. Render the cert-manager template
|
||||
@@ -105,7 +103,7 @@ helm fetch jetstack/cert-manager --version v1.7.1
|
||||
Render the cert-manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files.
|
||||
|
||||
```plain
|
||||
helm template cert-manager ./cert-manager-v1.7.1.tgz --output-dir . \
|
||||
helm template cert-manager ./cert-manager-v1.5.1.tgz --output-dir . \
|
||||
--namespace cert-manager \
|
||||
--set image.repository=<REGISTRY.YOURDOMAIN.COM:PORT>/quay.io/jetstack/cert-manager-controller \
|
||||
--set webhook.image.repository=<REGISTRY.YOURDOMAIN.COM:PORT>/quay.io/jetstack/cert-manager-webhook \
|
||||
@@ -117,7 +115,7 @@ helm template cert-manager ./cert-manager-v1.7.1.tgz --output-dir . \
|
||||
|
||||
Download the required CRD file for cert-manager:
|
||||
```plain
|
||||
curl -L -o cert-manager/cert-manager-crd.yaml https://github.com/jetstack/cert-manager/releases/download/v1.7.1/cert-manager.crds.yaml
|
||||
curl -L -o cert-manager/cert-manager-crd.yaml https://github.com/jetstack/cert-manager/releases/download/v1.5.1/cert-manager.crds.yaml
|
||||
```
|
||||
|
||||
### 5. Render the Rancher template
|
||||
|
||||
+1
-3
@@ -51,8 +51,6 @@ If you will use ARM64 hosts, the registry must support manifests. As of April 20
|
||||
|
||||
In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well.
|
||||
|
||||
**Note:** New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022.
|
||||
|
||||
1. Fetch the latest `cert-manager` Helm chart and parse the template for image details:
|
||||
|
||||
> **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{<baseurl>}}/rancher/v2.6/en/installation/resources/upgrading-cert-manager/).
|
||||
@@ -60,7 +58,7 @@ In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS
|
||||
```plain
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
helm fetch jetstack/cert-manager --version v1.7.1
|
||||
helm fetch jetstack/cert-manager --version v1.5.1
|
||||
helm template ./cert-manager-<version>.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt
|
||||
```
|
||||
|
||||
|
||||
+2
-4
@@ -23,17 +23,15 @@ kubectl create namespace cert-manager
|
||||
|
||||
Install the CustomResourceDefinitions of cert-manager:
|
||||
|
||||
**Note:** New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022.
|
||||
|
||||
```
|
||||
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.7.1/cert-manager.crds.yaml
|
||||
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.1/cert-manager.crds.yaml
|
||||
```
|
||||
|
||||
And install it with Helm. Note that cert-manager also needs your proxy configured in case it needs to communicate with Let's Encrypt or other external certificate issuers:
|
||||
|
||||
```
|
||||
helm upgrade --install cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager --version v1.7.1 \
|
||||
--namespace cert-manager --version v1.5.1 \
|
||||
--set http_proxy=http://${proxy_host} \
|
||||
--set https_proxy=http://${proxy_host} \
|
||||
--set no_proxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local
|
||||
|
||||
@@ -15,7 +15,7 @@ To address these changes, this guide will do two things:
|
||||
1. Explain the cert-manager API changes and link to cert-manager's official documentation for migrating your data
|
||||
|
||||
> **Important:**
|
||||
> If you are currently running the cert-manager whose version is 1.5 or below, and want to upgrade both Rancher and cert-manager to a new version (1.6+ in the case of cert-manager), then you need to re-install both Rancher and cert-manager due to the API change in cert-manager 1.6. This will also be necessary if you are upgrading from a version of cert manager below 0.11 to a version of cert-manager above 0.11. Follow the steps below:
|
||||
> If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them:
|
||||
|
||||
> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server
|
||||
> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager
|
||||
@@ -220,13 +220,6 @@ cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m
|
||||
|
||||
## Cert-Manager API change and data migration
|
||||
|
||||
---
|
||||
_New in v2.6.4_
|
||||
|
||||
Rancher now supports cert-manager versions 1.6.2 and 1.7.1. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. To read more, see the [cert-manager docs]({{<baseurl>}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#4-install-cert-manager). For instructions on upgrading cert-manager from version 1.5 to 1.6, see the upstream cert-manager documentation [here](https://cert-manager.io/docs/installation/upgrading/upgrading-1.5-1.6/). For instructions on upgrading cert-manager from version 1.6 to 1.7, see the upstream cert-manager documentation [here](https://cert-manager.io/docs/installation/upgrading/upgrading-1.6-1.7/).
|
||||
|
||||
---
|
||||
|
||||
Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release.
|
||||
|
||||
Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format.
|
||||
|
||||
@@ -11,7 +11,7 @@ weight: 20
|
||||
</td>
|
||||
<td width="30%" style="border: none;">
|
||||
<h4>Reporting process</h4>
|
||||
<p style="padding: 8px">Please submit possible security issues by emailing <a href="mailto:security-rancher@suse.com">security-rancher@suse.com</a> .</p>
|
||||
<p style="padding: 8px">Please submit possible security issues by emailing <a href="mailto:security@rancher.com">security@rancher.com</a> .</p>
|
||||
</td>
|
||||
<td width="30%" style="border: none;">
|
||||
<h4>Announcements</h4>
|
||||
|
||||
-14832
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
-8259
File diff suppressed because one or more lines are too long
@@ -1,641 +0,0 @@
|
||||
---
|
||||
title: Hardening Guide with CIS v1.6 Benchmark
|
||||
weight: 100
|
||||
---
|
||||
|
||||
This document provides prescriptive guidance for hardening a production installation of a RKE cluster to be used with Rancher v2.6.3. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS).
|
||||
|
||||
> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes.
|
||||
|
||||
This hardening guide is intended to be used for RKE clusters and associated with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher:
|
||||
|
||||
| Rancher Version | CIS Benchmark Version | Kubernetes Version |
|
||||
| --- | --- | --- |
|
||||
| Rancher v2.6.3 | Benchmark v1.6 | Kubernetes v1.18, v1.19, v1.20 and v1.21 |
|
||||
|
||||
[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.6/Rancher_v2-6_CIS_v1-6_Hardening_Guide.pdf).
|
||||
|
||||
- [Overview](#overview)
|
||||
- [Configure Kernel Runtime Parameters](#configure-kernel-runtime-parameters)
|
||||
- [Configure `etcd` user and group](#configure-etcd-user-and-group)
|
||||
- [Configure `default` service account](#configure-default-service-account)
|
||||
- [Configure Network Policy](#configure-network-policy)
|
||||
- [Reference Hardened RKE `cluster.yml` Configuration](#reference-hardened-rke-cluster-yml-configuration)
|
||||
- [Reference Hardened RKE Template Configuration](#reference-hardened-rke-template-configuration)
|
||||
- [Reference Hardened **cloud-config** Configuration](#reference-hardened-cloud-config-configuration)
|
||||
|
||||
### Overview
|
||||
|
||||
This document provides prescriptive guidance for hardening a RKE cluster to be used for installing Rancher v2.6.3 with Kubernetes v1.18 up to v1.21 or provisioning a RKE cluster with Kubernetes v1.18 up to v.21 to be used within Rancher v2.6.3. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS).
|
||||
|
||||
For more details about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS 1.6 Benchmark - Self-Assessment Guide - Rancher v2.6]({{<baseurl>}}/rancher/v2.6/en/security/hardening-guides/1.6-benchmark-2.6/).
|
||||
|
||||
#### Known Issues
|
||||
|
||||
- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS v1.6 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes.
|
||||
- When setting the `default_pod_security_policy_template_id:` to `restricted` or `restricted-noroot`, based on the pod security policies (PSP) [provided]({{<baseurl>}}/rancher/v2.6/en/admin-settings/pod-security-policies/) by Rancher, Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS v1.6 check 5.1.5 requires that the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments.
|
||||
|
||||
### Configure Kernel Runtime Parameters
|
||||
|
||||
The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`:
|
||||
|
||||
```ini
|
||||
vm.overcommit_memory=1
|
||||
vm.panic_on_oom=0
|
||||
kernel.panic=10
|
||||
kernel.panic_on_oops=1
|
||||
kernel.keys.root_maxbytes=25000000
|
||||
```
|
||||
|
||||
Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings.
|
||||
|
||||
### Configure `etcd` user and group
|
||||
|
||||
A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time.
|
||||
|
||||
#### Create `etcd` user and group
|
||||
|
||||
To create the **etcd** user and group run the following console commands. The commands below use `52034` for **uid** and **gid** are for example purposes. Any valid unused **uid** or **gid** could also be used in lieu of `52034`.
|
||||
|
||||
```bash
|
||||
groupadd --gid 52034 etcd
|
||||
useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd --shell /usr/sbin/nologin
|
||||
```
|
||||
|
||||
Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
etcd:
|
||||
gid: 52034
|
||||
uid: 52034
|
||||
```
|
||||
|
||||
### Configure `default` Service Account
|
||||
|
||||
#### Set `automountServiceAccountToken` to `false` for `default` service accounts
|
||||
|
||||
Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.
|
||||
|
||||
For each namespace including **default** and **kube-system** on a standard RKE install, the **default** service account must include this value:
|
||||
|
||||
```yaml
|
||||
automountServiceAccountToken: false
|
||||
```
|
||||
|
||||
Save the following configuration to a file called `account_update.yaml`.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: default
|
||||
automountServiceAccountToken: false
|
||||
```
|
||||
|
||||
Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions.
|
||||
|
||||
```bash
|
||||
#!/bin/bash -e
|
||||
|
||||
for namespace in $(kubectl get namespaces -A -o=jsonpath="{.items[*]['metadata.name']}"); do
|
||||
kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)"
|
||||
done
|
||||
```
|
||||
|
||||
### Configure Network Policy
|
||||
|
||||
#### Ensure that all Namespaces have Network Policies defined
|
||||
|
||||
Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.
|
||||
|
||||
Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. This guide uses [Canal](https://github.com/projectcalico/canal) to provide the policy enforcement. Additional information about CNI providers can be found [here](https://www.suse.com/c/rancher_blog/comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/).
|
||||
|
||||
Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a **permissive** example is provided below. If you want to allow all traffic to all pods in a namespace (even if policies are added that cause some pods to be treated as “isolated”), you can create a policy that explicitly allows all traffic in that namespace. Save the following configuration as `default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) about network policies can be found on the Kubernetes site.
|
||||
|
||||
> This `NetworkPolicy` is just an example and is not recommended for production use.
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: default-allow-all
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- {}
|
||||
egress:
|
||||
- {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
```
|
||||
|
||||
Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to `chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions.
|
||||
|
||||
```bash
|
||||
#!/bin/bash -e
|
||||
|
||||
for namespace in $(kubectl get namespaces -A -o=jsonpath="{.items[*]['metadata.name']}"); do
|
||||
kubectl apply -f default-allow-all.yaml -n ${namespace}
|
||||
done
|
||||
```
|
||||
|
||||
Execute this script to apply the `default-allow-all.yaml` configuration with the **permissive** `NetworkPolicy` to all namespaces.
|
||||
|
||||
### Reference Hardened RKE `cluster.yml` Configuration
|
||||
|
||||
The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install of Rancher Kubernetes Engine (RKE). RKE install [documentation]({{<baseurl>}}/rke/latest/en/installation/) is provided with additional details about the configuration items. This reference `cluster.yml` does not include the required **nodes** directive which will vary depending on your environment. Documentation for node configuration in RKE can be found [here]({{<baseurl>}}/rke/latest/en/config-options/nodes/).
|
||||
|
||||
> For a Kubernetes v1.18 cluster, the configuration `spec.volumes: 'ephemeral'` should be removed from the `PodSecurityPolicy`, since it's not supported in this Kubernetes release.
|
||||
|
||||
```yaml
|
||||
# If you intend to deploy Kubernetes in an air-gapped environment,
|
||||
# please consult the documentation on how to configure custom RKE images.
|
||||
# https://rancher.com/docs/rke/latest/en/installation/ .
|
||||
|
||||
# The nodes directive is required and will vary depending on your environment.
|
||||
# Documentation for node configuration can be found here:
|
||||
# https://rancher.com/docs/rke/latest/en/config-options/nodes/
|
||||
nodes: []
|
||||
services:
|
||||
etcd:
|
||||
image: ""
|
||||
extra_args: {}
|
||||
extra_binds: []
|
||||
extra_env: []
|
||||
win_extra_args: {}
|
||||
win_extra_binds: []
|
||||
win_extra_env: []
|
||||
external_urls: []
|
||||
ca_cert: ""
|
||||
cert: ""
|
||||
key: ""
|
||||
path: ""
|
||||
uid: 52034
|
||||
gid: 52034
|
||||
snapshot: false
|
||||
retention: ""
|
||||
creation: ""
|
||||
backup_config: null
|
||||
kube-api:
|
||||
image: ""
|
||||
extra_args: {}
|
||||
extra_binds: []
|
||||
extra_env: []
|
||||
win_extra_args: {}
|
||||
win_extra_binds: []
|
||||
win_extra_env: []
|
||||
service_cluster_ip_range: ""
|
||||
service_node_port_range: ""
|
||||
pod_security_policy: true
|
||||
always_pull_images: false
|
||||
secrets_encryption_config:
|
||||
enabled: true
|
||||
custom_config: null
|
||||
audit_log:
|
||||
enabled: true
|
||||
configuration: null
|
||||
admission_configuration: null
|
||||
event_rate_limit:
|
||||
enabled: true
|
||||
configuration: null
|
||||
kube-controller:
|
||||
image: ""
|
||||
extra_args:
|
||||
feature-gates: RotateKubeletServerCertificate=true
|
||||
tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
bind-address: 127.0.0.1
|
||||
extra_binds: []
|
||||
extra_env: []
|
||||
win_extra_args: {}
|
||||
win_extra_binds: []
|
||||
win_extra_env: []
|
||||
cluster_cidr: ""
|
||||
service_cluster_ip_range: ""
|
||||
scheduler:
|
||||
image: ""
|
||||
extra_args:
|
||||
tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
bind-address: 127.0.0.1
|
||||
extra_binds: []
|
||||
extra_env: []
|
||||
win_extra_args: {}
|
||||
win_extra_binds: []
|
||||
win_extra_env: []
|
||||
kubelet:
|
||||
image: ""
|
||||
extra_args:
|
||||
feature-gates: RotateKubeletServerCertificate=true
|
||||
protect-kernel-defaults: true
|
||||
tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
extra_binds: []
|
||||
extra_env: []
|
||||
win_extra_args: {}
|
||||
win_extra_binds: []
|
||||
win_extra_env: []
|
||||
cluster_domain: cluster.local
|
||||
infra_container_image: ""
|
||||
cluster_dns_server: ""
|
||||
fail_swap_on: false
|
||||
generate_serving_certificate: true
|
||||
kubeproxy:
|
||||
image: ""
|
||||
extra_args: {}
|
||||
extra_binds: []
|
||||
extra_env: []
|
||||
win_extra_args: {}
|
||||
win_extra_binds: []
|
||||
win_extra_env: []
|
||||
network:
|
||||
plugin: ""
|
||||
options: {}
|
||||
mtu: 0
|
||||
node_selector: {}
|
||||
update_strategy: null
|
||||
authentication:
|
||||
strategy: ""
|
||||
sans: []
|
||||
webhook: null
|
||||
addons: |
|
||||
# Upstream Kubernetes restricted PSP policy
|
||||
# https://github.com/kubernetes/website/blob/564baf15c102412522e9c8fc6ef2b5ff5b6e766c/content/en/examples/policy/restricted-psp.yaml
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: restricted-noroot
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
# Allow core volume types.
|
||||
volumes:
|
||||
- 'configMap'
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
# Assume that ephemeral CSI drivers & persistentVolumes set up by the cluster admin are safe to use.
|
||||
- 'csi'
|
||||
- 'persistentVolumeClaim'
|
||||
- 'ephemeral'
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
# Require the container to run without root privileges.
|
||||
rule: 'MustRunAsNonRoot'
|
||||
seLinux:
|
||||
# This policy assumes the nodes are using AppArmor rather than SELinux.
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
# Forbid adding the root group.
|
||||
- min: 1
|
||||
max: 65535
|
||||
fsGroup:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
# Forbid adding the root group.
|
||||
- min: 1
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: false
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:restricted-noroot
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resourceNames:
|
||||
- restricted-noroot
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: psp:restricted-noroot
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: psp:restricted-noroot
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: Group
|
||||
name: system:serviceaccounts
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: Group
|
||||
name: system:authenticated
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: default-allow-all
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- {}
|
||||
egress:
|
||||
- {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: default
|
||||
automountServiceAccountToken: false
|
||||
addons_include: []
|
||||
system_images:
|
||||
etcd: ""
|
||||
alpine: ""
|
||||
nginx_proxy: ""
|
||||
cert_downloader: ""
|
||||
kubernetes_services_sidecar: ""
|
||||
kubedns: ""
|
||||
dnsmasq: ""
|
||||
kubedns_sidecar: ""
|
||||
kubedns_autoscaler: ""
|
||||
coredns: ""
|
||||
coredns_autoscaler: ""
|
||||
nodelocal: ""
|
||||
kubernetes: ""
|
||||
flannel: ""
|
||||
flannel_cni: ""
|
||||
calico_node: ""
|
||||
calico_cni: ""
|
||||
calico_controllers: ""
|
||||
calico_ctl: ""
|
||||
calico_flexvol: ""
|
||||
canal_node: ""
|
||||
canal_cni: ""
|
||||
canal_controllers: ""
|
||||
canal_flannel: ""
|
||||
canal_flexvol: ""
|
||||
weave_node: ""
|
||||
weave_cni: ""
|
||||
pod_infra_container: ""
|
||||
ingress: ""
|
||||
ingress_backend: ""
|
||||
metrics_server: ""
|
||||
windows_pod_infra_container: ""
|
||||
ssh_key_path: ""
|
||||
ssh_cert_path: ""
|
||||
ssh_agent_auth: false
|
||||
authorization:
|
||||
mode: ""
|
||||
options: {}
|
||||
ignore_docker_version: false
|
||||
kubernetes_version: ""
|
||||
private_registries: []
|
||||
ingress:
|
||||
provider: ""
|
||||
options: {}
|
||||
node_selector: {}
|
||||
extra_args: {}
|
||||
dns_policy: ""
|
||||
extra_envs: []
|
||||
extra_volumes: []
|
||||
extra_volume_mounts: []
|
||||
update_strategy: null
|
||||
http_port: 0
|
||||
https_port: 0
|
||||
network_mode: ""
|
||||
cluster_name:
|
||||
cloud_provider:
|
||||
name: ""
|
||||
prefix_path: ""
|
||||
win_prefix_path: ""
|
||||
addon_job_timeout: 0
|
||||
bastion_host:
|
||||
address: ""
|
||||
port: ""
|
||||
user: ""
|
||||
ssh_key: ""
|
||||
ssh_key_path: ""
|
||||
ssh_cert: ""
|
||||
ssh_cert_path: ""
|
||||
monitoring:
|
||||
provider: ""
|
||||
options: {}
|
||||
node_selector: {}
|
||||
update_strategy: null
|
||||
replicas: null
|
||||
restore:
|
||||
restore: false
|
||||
snapshot_name: ""
|
||||
dns: null
|
||||
upgrade_strategy:
|
||||
max_unavailable_worker: ""
|
||||
max_unavailable_controlplane: ""
|
||||
drain: null
|
||||
node_drain_input: null
|
||||
```
|
||||
|
||||
### Reference Hardened RKE Template Configuration
|
||||
|
||||
The reference RKE template provides the configuration needed to achieve a hardened install of Kubernetes. RKE templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher [documentation]({{<baseurl>}}/rancher/v2.6/en/installation) for additional installation and RKE template details.
|
||||
|
||||
```yaml
|
||||
#
|
||||
# Cluster Config
|
||||
#
|
||||
default_pod_security_policy_template_id: restricted-noroot
|
||||
docker_root_dir: /var/lib/docker
|
||||
enable_cluster_alerting: false
|
||||
enable_cluster_monitoring: false
|
||||
enable_network_policy: true
|
||||
local_cluster_auth_endpoint:
|
||||
enabled: true
|
||||
name: ''
|
||||
#
|
||||
# Rancher Config
|
||||
#
|
||||
rancher_kubernetes_engine_config:
|
||||
addon_job_timeout: 45
|
||||
authentication:
|
||||
strategy: x509
|
||||
dns:
|
||||
nodelocal:
|
||||
ip_address: ''
|
||||
node_selector: null
|
||||
update_strategy: {}
|
||||
enable_cri_dockerd: false
|
||||
ignore_docker_version: true
|
||||
#
|
||||
# # Currently only nginx ingress provider is supported.
|
||||
# # To disable ingress controller, set `provider: none`
|
||||
# # To enable ingress on specific nodes, use the node_selector, eg:
|
||||
# provider: nginx
|
||||
# node_selector:
|
||||
# app: ingress
|
||||
#
|
||||
ingress:
|
||||
default_backend: false
|
||||
default_ingress_class: true
|
||||
http_port: 0
|
||||
https_port: 0
|
||||
provider: nginx
|
||||
kubernetes_version: v1.21.8-rancher1-1
|
||||
monitoring:
|
||||
provider: metrics-server
|
||||
replicas: 1
|
||||
#
|
||||
# If you are using calico on AWS
|
||||
#
|
||||
# network:
|
||||
# plugin: calico
|
||||
# calico_network_provider:
|
||||
# cloud_provider: aws
|
||||
#
|
||||
# # To specify flannel interface
|
||||
#
|
||||
# network:
|
||||
# plugin: flannel
|
||||
# flannel_network_provider:
|
||||
# iface: eth1
|
||||
#
|
||||
# # To specify flannel interface for canal plugin
|
||||
#
|
||||
# network:
|
||||
# plugin: canal
|
||||
# canal_network_provider:
|
||||
# iface: eth1
|
||||
#
|
||||
network:
|
||||
mtu: 0
|
||||
options:
|
||||
flannel_backend_type: vxlan
|
||||
plugin: canal
|
||||
rotate_encryption_key: false
|
||||
#
|
||||
# services:
|
||||
# kube-api:
|
||||
# service_cluster_ip_range: 10.43.0.0/16
|
||||
# kube-controller:
|
||||
# cluster_cidr: 10.42.0.0/16
|
||||
# service_cluster_ip_range: 10.43.0.0/16
|
||||
# kubelet:
|
||||
# cluster_domain: cluster.local
|
||||
# cluster_dns_server: 10.43.0.10
|
||||
#
|
||||
services:
|
||||
scheduler:
|
||||
extra_args:
|
||||
tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
bind-address: 127.0.0.1
|
||||
etcd:
|
||||
backup_config:
|
||||
enabled: true
|
||||
interval_hours: 12
|
||||
retention: 6
|
||||
safe_timestamp: false
|
||||
timeout: 300
|
||||
creation: 12h
|
||||
extra_args:
|
||||
election-timeout: 5000
|
||||
heartbeat-interval: 500
|
||||
retention: 72h
|
||||
snapshot: false
|
||||
uid: 52034
|
||||
gid: 52034
|
||||
kube_api:
|
||||
always_pull_images: false
|
||||
audit_log:
|
||||
enabled: true
|
||||
event_rate_limit:
|
||||
enabled: true
|
||||
pod_security_policy: true
|
||||
secrets_encryption_config:
|
||||
enabled: true
|
||||
service_node_port_range: 30000-32767
|
||||
kube-controller:
|
||||
extra_args:
|
||||
feature-gates: RotateKubeletServerCertificate=true
|
||||
tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
bind-address: 127.0.0.1
|
||||
kubelet:
|
||||
extra_args:
|
||||
feature-gates: RotateKubeletServerCertificate=true
|
||||
protect-kernel-defaults: true
|
||||
tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
fail_swap_on: false
|
||||
generate_serving_certificate: true
|
||||
ssh_agent_auth: false
|
||||
upgrade_strategy:
|
||||
max_unavailable_controlplane: '1'
|
||||
max_unavailable_worker: 10%
|
||||
windows_prefered_cluster: false
|
||||
```
|
||||
|
||||
### Reference Hardened **cloud-config** Configuration
|
||||
|
||||
A **cloud-config** configuration file is generally used in cloud infrastructure environments to allow for configuration management of compute instances. The reference config configures SUSE Linux Enterprise Server (SLES), openSUSE Leap, Red Hat Enterprise Linux (RHEL) and Ubuntu operating system level settings needed before installing Kubernetes.
|
||||
|
||||
#### Reference Hardened **cloud-config** for SUSE Linux Enterprise Server 15 (SLES 15) and openSUSE Leap 15
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
system_info:
|
||||
default_user:
|
||||
groups:
|
||||
- docker
|
||||
write_files:
|
||||
- path: "/etc/sysctl.d/90-kubelet.conf"
|
||||
owner: root:root
|
||||
permissions: '0644'
|
||||
content: |
|
||||
vm.overcommit_memory=1
|
||||
vm.panic_on_oom=0
|
||||
kernel.panic=10
|
||||
kernel.panic_on_oops=1
|
||||
kernel.keys.root_maxbytes=25000000
|
||||
package_update: true
|
||||
ssh_pwauth: false
|
||||
runcmd:
|
||||
# Docker should already be installed in SLES 15 SP3
|
||||
- zypper install docker containerd
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable docker.service
|
||||
- systemctl start --no-block docker.service
|
||||
- sysctl -p /etc/sysctl.d/90-kubelet.conf
|
||||
- groupadd --gid 52034 etcd
|
||||
- useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd --shell /usr/sbin/nologin
|
||||
```
|
||||
|
||||
#### Reference Hardened **cloud-config** for Red Hat Enterprise Linux 8 (RHEL 8) and Ubuntu 20.04 LTS
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
system_info:
|
||||
default_user:
|
||||
groups:
|
||||
- docker
|
||||
write_files:
|
||||
- path: "/etc/sysctl.d/90-kubelet.conf"
|
||||
owner: root:root
|
||||
permissions: '0644'
|
||||
content: |
|
||||
vm.overcommit_memory=1
|
||||
vm.panic_on_oom=0
|
||||
kernel.panic=10
|
||||
kernel.panic_on_oops=1
|
||||
kernel.keys.root_maxbytes=25000000
|
||||
package_update: true
|
||||
ssh_pwauth: false
|
||||
runcmd:
|
||||
# Install Docker from Rancher's Docker installation scripts - github.com/rancher/install-docker
|
||||
- curl https://releases.rancher.com/install-docker/20.10.sh | sh
|
||||
- sysctl -p /etc/sysctl.d/90-kubelet.conf
|
||||
- groupadd --gid 52034 etcd
|
||||
- useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd --shell /usr/sbin/nologin
|
||||
```
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: Self-Assessment and Hardening Guides for Rancher v2.6
|
||||
shortTitle: Rancher v2.6 Hardening Guides
|
||||
shortTitle: Rancher v2.6 Guides
|
||||
weight: 1
|
||||
aliases:
|
||||
- /rancher/v2.6/en/security/rancher-2.5/
|
||||
@@ -10,56 +10,4 @@ aliases:
|
||||
- /rancher/v2.6/en/security/rancher-2.5/1.6-benchmark-2.5/
|
||||
---
|
||||
|
||||
Rancher provides specific security hardening guides for each supported Rancher's Kubernetes distributions.
|
||||
|
||||
- [Rancher Kubernetes Distributions](#rancher-kubernetes-distributions)
|
||||
- [Hardening Guides and Benchmark Versions](#hardening-guides-and-benchmark-versions)
|
||||
- [RKE Guides](#rke-guides)
|
||||
- [RKE2 Guides](#rke2-guides)
|
||||
- [K3s Guides](#k3s)
|
||||
- [Rancher with SELinux](#rancher-with-selinux)
|
||||
|
||||
# Rancher Kubernetes Distributions
|
||||
|
||||
Rancher uses the following Kubernetes distributions:
|
||||
|
||||
- [**RKE**]({{<baseurl>}}/rke/latest/en/), Rancher Kubernetes Engine, is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers.
|
||||
- [**RKE2**](https://docs.rke2.io/) is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector.
|
||||
- [**K3s**]({{<baseurl>}}/k3s/latest/en/) is a fully conformant, lightweight Kubernetes distribution. It is easy to install, with half the memory of upstream Kubernetes, all in a binary of less than 100 MB.
|
||||
|
||||
To harden a Kubernetes cluster outside of Rancher's distributions, refer to your Kubernetes provider docs.
|
||||
|
||||
# Hardening Guides and Benchmark Versions
|
||||
|
||||
These guides have been tested along with the Rancher v2.6 release. Each self-assessment guide is accompanied with a hardening guide and tested on a specific Kubernetes version and CIS benchmark version. If a CIS benchmark has not been validated for your Kubernetes version, you can choose to use the existing guides until a newer version is added.
|
||||
|
||||
### RKE Guides
|
||||
|
||||
| Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guides |
|
||||
| --- | --- | --- | --- |
|
||||
| Kubernetes v1.18, v1.19, v1.20 and v1.21 | CIS v1.6 | [Link](./1.6-benchmark-2.6) | [Link](./1.6-hardening-2.6) |
|
||||
|
||||
> **Notes**
|
||||
>
|
||||
> - Kubernetes v1.22 is currently in experimental mode in Rancher v2.6.3.
|
||||
> - CIS v1.20 benchmark version for Kubernetes v1.19 and v1.20 is not yet released as a profile in Rancher's CIS Benchmark chart.
|
||||
|
||||
### RKE2 Guides
|
||||
|
||||
| Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guides |
|
||||
| --- | --- | --- | --- |
|
||||
| Kubernetes v1.18 | CIS v1.5 | [Link](https://docs.rke2.io/security/cis_self_assessment15/) | [Link](https://docs.rke2.io/security/hardening_guide/) |
|
||||
| Kubernetes v1.20 | CIS v1.6 | [Link](https://docs.rke2.io/security/cis_self_assessment16/) | [Link](https://docs.rke2.io/security/hardening_guide/) |
|
||||
|
||||
### K3s Guides
|
||||
|
||||
| Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guide |
|
||||
| --- | --- | --- | --- |
|
||||
| Kubernetes v1.17, v1.18, & v1.19 | CIS v1.5 | [Link]({{<baseurl>}}/k3s/latest/en/security/self_assessment/) | [Link]({{<baseurl>}}/k3s/latest/en/security/hardening_guide/) |
|
||||
|
||||
|
||||
# Rancher with SELinux
|
||||
|
||||
[Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. After being historically used by government agencies, SELinux is now industry standard and is enabled by default on RHEL and CentOS.
|
||||
|
||||
To use Rancher with SELinux, we recommend installing the `rancher-selinux` RPM according to the instructions on [this page.]({{<baseurl>}}/rancher/v2.6/en/security/selinux/#installing-the-rancher-selinux-rpm)
|
||||
Rancher v2.6 hardening guides are currently being updated. For the time being, please consult [Rancher v2.5 self-assessment and hardening guides]({{<baseurl>}}/rancher/v2.5/en/security/rancher-2.5) for more information.
|
||||
|
||||
@@ -25,8 +25,6 @@ To navigate to the Harvester cluster, click **☰ > Virtualization Management**.
|
||||
|
||||
The [Harvester node driver](https://docs.harvesterhci.io/v0.3/rancher/node-driver/) is marked as `tech preview` on RKE and RKE2 options in Rancher. This will be the case both on the Create page and once the driver is already enabled. The node driver is available whether or not the Harvester feature flag is enabled. Note that the node driver is off by default. Users may create RKE or RKE2 clusters on Harvester only from the Cluster Management page.
|
||||
|
||||
Harvester allows `.ISO` images to be uploaded and displayed through the Harvester UI, but this is not supported in the Rancher UI. This is because `.ISO` images usually require additional setup that interferes with a clean deployment (without requiring user intervention), and they are not typically used in cloud environments.
|
||||
|
||||
Click [here]({{<baseurl>}}/rancher/v2.6/en/admin-settings/drivers/#node-drivers) for more information on node drivers in Rancher.
|
||||
|
||||
### Limitations
|
||||
|
||||
@@ -238,7 +238,7 @@ h2 {
|
||||
font-size:1.5em;
|
||||
}
|
||||
|
||||
h3 {font-size:1.2em;}
|
||||
h3 {font-size:1.4em;}
|
||||
h4 {font-size:1.3em;
|
||||
line-height:30px;
|
||||
}
|
||||
@@ -283,7 +283,7 @@ nav ul li a {
|
||||
nav ul li a::after {content: target-counter(attr(href url), page, decimal); float:right;margin-right:10px;}
|
||||
nav ul li ul {list-style-type: none; border-left-style: dashed; border-left-width: 1px; border-color: #000; margin-top:1.5em;}
|
||||
nav ul li ul li {margin-left:-.5em;color:#ff0000;}
|
||||
nav ul li ul li a {border:none;font-family:PoppinsExtraLight;font-size:.75em;margin-bottom:1.8em;}
|
||||
nav ul li ul li a {border:none;font-family:PoppinsExtraLight;margin-top:-1.5em;}
|
||||
nav ul li ul li a::after {font-size:.75em;}
|
||||
nav code {background:none;}
|
||||
nav a{text-decoration:none;outline:none;color:#000;}
|
||||
|
||||
@@ -19,7 +19,7 @@ get_id_text() {
|
||||
|
||||
get_section_ids() {
|
||||
id=${1}
|
||||
jq -r --arg id "${id}" '.[] | select(.id==$id) | .checks[].id' ${results_file} | sort -V
|
||||
jq -r --arg id "${id}" '.[] | select(.id==$id) | .checks[].id' ${results_file}
|
||||
}
|
||||
|
||||
get_section_desc() {
|
||||
@@ -53,11 +53,12 @@ for id in $(get_ids); do
|
||||
test_desc=$(echo ${result} | jq -r '.description')
|
||||
audit=$(echo ${result} | jq -r '.audit')
|
||||
audit_config=$(echo ${result} | jq -r '.audit_config')
|
||||
actual_value=$(echo ${result} | jq -r '.actual_value_per_node[]')
|
||||
actual_value=$(echo ${result} | jq -r '.actual_value_per_node."cis-aio-0"')
|
||||
type=$(echo ${result} | jq -r '.test_type')
|
||||
status=$(echo ${result} | jq -r '.state')
|
||||
remediation=$(echo ${result} | jq -r '.remediation')
|
||||
expected_result=$(echo ${result} | jq -r '.expected_result')
|
||||
# echo "#### ${test} ${test_desc}"
|
||||
echo
|
||||
if [ "${type}" = "skip" ]; then
|
||||
echo "**Result:** Not Applicable"
|
||||
@@ -75,7 +76,7 @@ for id in $(get_ids); do
|
||||
if [[ ${audit} =~ ".sh" ]]; then
|
||||
audit_script=$(basename $(echo ${audit} | cut -d ' ' -f1))
|
||||
test_helper="${test_helpers}/${audit_script}"
|
||||
echo "**Audit Script:** \`${audit_script}\`"
|
||||
echo "**Audit Script:** ${audit_script}"
|
||||
echo
|
||||
echo '```bash'
|
||||
cat ${test_helper}
|
||||
@@ -105,14 +106,6 @@ for id in $(get_ids); do
|
||||
echo '```'
|
||||
echo
|
||||
fi
|
||||
if [ ! -z "${expected_result}" ]; then
|
||||
echo "**Expected Result**:"
|
||||
echo
|
||||
echo '```console'
|
||||
echo ${expected_result}
|
||||
echo '```'
|
||||
echo
|
||||
fi
|
||||
if [ ! -z "${actual_value}" ] && [ "${status}" != "PASS" ] && [ "${type}" != "skip" ] && [ "${type}" != "manual" ]; then
|
||||
echo "**Returned Value**:"
|
||||
echo
|
||||
@@ -121,6 +114,14 @@ for id in $(get_ids); do
|
||||
echo '```'
|
||||
echo
|
||||
fi
|
||||
if [ ! -z "${expected_result}" ]; then
|
||||
echo "**Expected result**:"
|
||||
echo
|
||||
echo '```console'
|
||||
echo ${expected_result}
|
||||
echo '```'
|
||||
echo
|
||||
fi
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
Reference in New Issue
Block a user