From 03ea6163fb53e46c2ecd6e5cb9f2b2b4a1c74721 Mon Sep 17 00:00:00 2001 From: Billy Tat Date: Wed, 6 Dec 2023 11:48:27 -0800 Subject: [PATCH] Port version-2.8 updates to latest (/docs) (#1013) * Port version-2.8 updates to latest (/docs) Includes changes from 1b6d9506 (2023-10-06) to 1f39a6ff (2023-11-30) * Fix redirects --- docs/api/api-reference.mdx | 7 + docs/api/quickstart.md | 140 ++++++++++++++++++ docs/api/workflows/projects.md | 109 ++++++++++++++ .../installation-references/feature-flags.md | 2 +- .../port-requirements.md | 2 +- .../deploy-apps-across-clusters/fleet.md | 6 +- .../dynamically-provision-new-storage.md | 2 +- .../set-up-existing-storage.md | 2 +- .../aws-cloud-marketplace/install-adapter.md | 14 +- .../elemental/elemental.md | 27 ++++ docs/integrations-in-rancher/epinio/epinio.md | 22 +++ .../architecture.md | 4 - docs/integrations-in-rancher/fleet/fleet.md | 23 +++ .../fleet/overview.md} | 14 +- .../use-fleet-behind-a-proxy.md | 4 - .../windows-support.md | 4 - .../harvester/harvester.md | 11 ++ .../{harvester.md => harvester/overview.md} | 20 +-- .../integrations-in-rancher.mdx | 66 +++++++++ .../kubernetes-distributions.md | 31 ++++ .../kubewarden/kubewarden.md | 35 +++++ .../longhorn/longhorn.md | 15 ++ .../{longhorn.md => longhorn/overview.md} | 4 +- .../built-in-dashboards.md | 2 +- .../neuvector/neuvector.md | 27 ++++ .../{neuvector.md => neuvector/overview.md} | 6 +- docs/integrations-in-rancher/opni/opni.md | 23 +++ .../rancher-desktop.md | 34 +++++ .../about-provisioning-drivers.md | 2 +- .../create-kubernetes-persistent-storage.md | 2 +- docs/pages-for-subheaders/rancher-security.md | 2 +- .../rancher-webhook-hardening.md | 133 +++++++++++++++++ docusaurus.config.js | 6 +- sidebars.js | 97 +++++++++--- 34 files changed, 810 insertions(+), 88 deletions(-) create mode 100644 docs/api/api-reference.mdx create mode 100644 docs/api/quickstart.md create mode 100644 docs/api/workflows/projects.md create mode 100644 docs/integrations-in-rancher/elemental/elemental.md create mode 100644 docs/integrations-in-rancher/epinio/epinio.md rename docs/integrations-in-rancher/{fleet-gitops-at-scale => fleet}/architecture.md (79%) create mode 100644 docs/integrations-in-rancher/fleet/fleet.md rename docs/{pages-for-subheaders/fleet-gitops-at-scale.md => integrations-in-rancher/fleet/overview.md} (81%) rename docs/integrations-in-rancher/{fleet-gitops-at-scale => fleet}/use-fleet-behind-a-proxy.md (94%) rename docs/integrations-in-rancher/{fleet-gitops-at-scale => fleet}/windows-support.md (84%) create mode 100644 docs/integrations-in-rancher/harvester/harvester.md rename docs/integrations-in-rancher/{harvester.md => harvester/overview.md} (82%) create mode 100644 docs/integrations-in-rancher/integrations-in-rancher.mdx create mode 100644 docs/integrations-in-rancher/kubernetes-distributions/kubernetes-distributions.md create mode 100644 docs/integrations-in-rancher/kubewarden/kubewarden.md create mode 100644 docs/integrations-in-rancher/longhorn/longhorn.md rename docs/integrations-in-rancher/{longhorn.md => longhorn/overview.md} (96%) create mode 100644 docs/integrations-in-rancher/neuvector/neuvector.md rename docs/integrations-in-rancher/{neuvector.md => neuvector/overview.md} (98%) create mode 100644 docs/integrations-in-rancher/opni/opni.md create mode 100644 docs/integrations-in-rancher/rancher-desktop.md create mode 100644 docs/reference-guides/rancher-security/rancher-webhook-hardening.md diff --git a/docs/api/api-reference.mdx b/docs/api/api-reference.mdx new file mode 100644 index 00000000000..d8674b6e14f --- /dev/null +++ b/docs/api/api-reference.mdx @@ -0,0 +1,7 @@ +--- +title: API Reference +--- + +import ApiDocMdx from '@theme/ApiDocMdx'; + + \ No newline at end of file diff --git a/docs/api/quickstart.md b/docs/api/quickstart.md new file mode 100644 index 00000000000..4529964d59a --- /dev/null +++ b/docs/api/quickstart.md @@ -0,0 +1,140 @@ +--- +title: API Quick Start Guide +--- + +You can access Rancher's resources through the Kubernetes API. This guide will help you get started on using this API as a Rancher user. + +1. In the upper left corner, click **☰ > Global Settings**. +2. Find and copy the address in the `server-url` field. +3. [Create](../reference-guides/user-settings/api-keys#creating-an-api-key) a Rancher API key with no scope. + + :::danger + + A Rancher API key with no scope grants unrestricted access to all resources that the user can access. To prevent unauthorized use, this key should be stored securely and rotated frequently. + + ::: + +4. Create a `kubeconfig.yaml` file. Replace `$SERVER_URL` with the server url and `$API_KEY` with your Rancher API key: + + ```yaml + apiVersion: v1 + kind: Config + clusters: + - name: "rancher" + cluster: + server: "$SERVER_URL" + + users: + - name: "rancher" + user: + token: "$API_KEY" + + contexts: + - name: "rancher" + context: + user: "rancher" + cluster: "rancher" + + current-context: "rancher" + ``` + +You can use this file with any compatible tool, such as kubectl or [client-go](https://github.com/kubernetes/client-go). For a quick demo, see the [kubectl example](#api-kubectl-example). + +For more information on handling more complex certificate setups, see [Specifying CA Certs](#specifying-ca-certs). + +For more information on available kubeconfig options, see the [upstream documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). + +## API kubectl Example + +1. Set your KUBECONFIG environment variable to the kubeconfig file you just created: + + ```bash + export KUBECONFIG=$(pwd)/kubeconfig.yaml + ``` + +2. Use `kubectl explain` to view the available fields for projects, or complex sub-fields of resources: + + ```bash + kubectl explain projects + kubectl explain projects.spec + ``` + +Not all resources may have detailed output. + +3. Add the following content to a file named `project.yaml`: + + ```yaml + apiVersion: management.cattle.io/v3 + kind: Project + metadata: + # name should be unique across all projects in every cluster + name: p-abc123 + # generateName can be used instead of `name` to randomly generate a name. + # generateName: p- + # namespace should match spec.ClusterName. + namespace: local + spec: + # clusterName should match `metadata.Name` of the target cluster. + clusterName: local + description: Example Project + # displayName is the human-readable name and is visible from the UI. + displayName: Example + ``` + +4. Create the project: + + ```bash + kubectl create -f project.yaml + ``` + +5. Delete the project: + + How you delete the project depends on how you created the project name. + + **A. If you used `name` when creating the project**: + + ```bash + kubectl delete -f project.yaml + ``` + + **B. If you used `generateName`**: + + Replace `$PROJECT_NAME` with the randomly generated name of the project displayed by Kubectl after you created the project. + + ```bash + kubectl delete project $PROJECT_NAME -n local + ``` + +## Specifying CA Certs + +To ensure that your tools can recognize Rancher's CA certificates, most setups require additional modifications to the above template. + +1. In the upper left corner, click **☰ > Global Settings**. +2. Find and copy the value in the `ca-certs` field. +3. Save the value in a file named `rancher.crt`. + + :::note + If your Rancher instance is proxied by another service, you must extract the certificate that the service is using, and add it to the kubeconfig file, as demonstrated in step 5. + ::: + +4. The following commands will convert `rancher.crt` to base64 output, trim all new-lines, and update the cluster in the kubeconfig with the certificate, then finishing by removing the `rancher.crt` file: + + ```bash + export KUBECONFIG=$PATH_TO_RANCHER_KUBECONFIG + kubectl config set clusters.rancher.certificate-authority-data $(cat rancher.crt | base64 -i - | tr -d '\n') + rm rancher.crt + ``` +5. (Optional) If you use self-signed certificatess that aren't trusted by your system, you can set the insecure option in your kubeconfig with kubectl: + + :::danger + + This option shouldn't be used in production as it is a security risk. + + ::: + + ```bash + export KUBECONFIG=$PATH_TO_RANCHER_KUBECONFIG + kubectl config set clusters.rancher.insecure-skip-tls-verify true + ``` + + If your Rancher instance is proxied by another service, you must extract the certificate that the service is using, and add it to the kubeconfig file, as demonstrated above. diff --git a/docs/api/workflows/projects.md b/docs/api/workflows/projects.md new file mode 100644 index 00000000000..ddc2f8c5aae --- /dev/null +++ b/docs/api/workflows/projects.md @@ -0,0 +1,109 @@ +--- +title: Projects +--- + +## Creating a Project + +Project resources may only be created on the management cluster. See below for [creating namespaces under projects in a managed cluster](#creating-a-namespace-in-a-project). + +### Creating a Basic Project + +```bash +kubectl create -f - <:`. + +## Deleting a Project + +Look up the project to delete in the cluster namespace: + +```bash +kubectl --namespace c-m-abcde get projects +``` + +Delete the project under the cluster namespace: + +```bash +kubectl --namespace c-m-abcde delete project p-vwxyz +``` diff --git a/docs/getting-started/installation-and-upgrade/installation-references/feature-flags.md b/docs/getting-started/installation-and-upgrade/installation-references/feature-flags.md index 377d8111523..176fc240314 100644 --- a/docs/getting-started/installation-and-upgrade/installation-references/feature-flags.md +++ b/docs/getting-started/installation-and-upgrade/installation-references/feature-flags.md @@ -20,7 +20,7 @@ The following is a list of feature flags available in Rancher. If you've upgrade - `continuous-delivery`: Allows Fleet GitOps to be disabled separately from Fleet. See [Continuous Delivery.](../../../how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md) for more information. - `fleet`: The Rancher provisioning framework in v2.6 and later requires Fleet. The flag will be automatically enabled when you upgrade, even if you disabled this flag in an earlier version of Rancher. See [Fleet - GitOps at Scale](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) for more information. -- `harvester`: Manages access to the Virtualization Management page, where users can navigate directly to Harvester clusters and access the Harvester UI. See [Harvester Integration](../../../integrations-in-rancher/harvester.md) for more information. +- `harvester`: Manages access to the Virtualization Management page, where users can navigate directly to Harvester clusters and access the Harvester UI. See [Harvester Integration Overview](../../../integrations-in-rancher/harvester/overview.md) for more information. - `istio-virtual-service-ui`: Enables a [visual interface](../../../how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md) to create, read, update, and delete Istio virtual services and destination rules, which are Istio traffic management features. - `legacy`: Enables a set of features from 2.5.x and earlier, that are slowly being phased out in favor of newer implementations. These are a mix of deprecated features as well as features that will eventually be available to newer versions. This flag is disabled by default on new Rancher installations. If you're upgrading from a previous version of Rancher, this flag is enabled. - `multi-cluster-management`: Allows multi-cluster provisioning and management of Kubernetes clusters. This flag can only be set at install time. It can't be enabled or disabled later. diff --git a/docs/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md b/docs/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md index 73c739499a5..eecd8dd258b 100644 --- a/docs/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md +++ b/docs/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md @@ -196,7 +196,7 @@ If security isn't a large concern and you're okay with opening a few additional ### Ports for Harvester Clusters -Refer [here](../../../integrations-in-rancher/harvester.md#port-requirements) for more information on Harvester port requirements. +Refer to the [Harvester Integration Overview](../../../integrations-in-rancher/harvester/overview.md#port-requirements) for more information on Harvester port requirements. ### Ports for Rancher Launched Kubernetes Clusters using Node Pools diff --git a/docs/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md b/docs/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md index 8ff8e4cd5be..e9d4dec7faa 100644 --- a/docs/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md +++ b/docs/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md @@ -13,7 +13,7 @@ Fleet is a separate project from Rancher, and can be installed on any Kubernetes ## Architecture -For information about how Fleet works, see [this page.](../../../integrations-in-rancher/fleet-gitops-at-scale/architecture.md) +For information about how Fleet works, see [this page.](../../../integrations-in-rancher/fleet/architecture.md) ## Accessing Fleet in the Rancher UI @@ -38,7 +38,7 @@ Follow the steps below to access Continuous Delivery in the Rancher UI: ## Windows Support -For details on support for clusters with Windows nodes, see [this page.](../../../integrations-in-rancher/fleet-gitops-at-scale/windows-support.md) +For details on support for clusters with Windows nodes, see [this page.](../../../integrations-in-rancher/fleet/windows-support.md) ## GitHub Repository @@ -48,7 +48,7 @@ The Fleet Helm charts are available [here.](https://github.com/rancher/fleet/rel ## Using Fleet Behind a Proxy -For details on using Fleet behind a proxy, see [this page.](../../../integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md) +For details on using Fleet behind a proxy, see [this page.](../../../integrations-in-rancher/fleet/use-fleet-behind-a-proxy.md) ## Helm Chart Dependencies diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md b/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md index 5ad0b03cd45..77343a9f5c9 100644 --- a/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md +++ b/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md @@ -12,7 +12,7 @@ This section assumes that you understand the Kubernetes concepts of storage clas New storage is often provisioned by a cloud provider such as Amazon EBS. However, new storage doesn't have to be in the cloud. -If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [this page.](../../../../../integrations-in-rancher/longhorn.md) +If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [Cloud Native Storage with Longhorn](../../../../../integrations-in-rancher/longhorn/longhorn.md). To provision new storage for your workloads, follow these steps: diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md b/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md index 60661aea03b..4be791f5cc3 100644 --- a/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md +++ b/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md @@ -31,7 +31,7 @@ Creating a persistent volume in Rancher will not create a storage volume. It onl The steps to set up a persistent storage device will differ based on your infrastructure. We provide examples of how to set up storage using [vSphere,](../../provisioning-storage-examples/vsphere-storage.md) [NFS,](../../provisioning-storage-examples/nfs-storage.md) or Amazon's [EBS.](../../provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) -If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [this page.](../../../../../integrations-in-rancher/longhorn.md) +If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [Cloud Native Storage with Longhorn](../../../../../integrations-in-rancher/longhorn/longhorn.md). ### 2. Add a PersistentVolume that refers to the persistent storage diff --git a/docs/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md b/docs/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md index 420e05b2d81..6a325ef79a8 100644 --- a/docs/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md +++ b/docs/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md @@ -17,15 +17,9 @@ In order to deploy and run the adapter successfully, you need to ensure its vers ::: -| Rancher Version | Adapter Version | -|-----------------|:---------------:| -| v2.7.0 | v2.0.0 | -| v2.7.1 | v2.0.0 | -| v2.7.2 | v2.0.1 | -| v2.7.3 | v2.0.1 | -| v2.7.4 | v2.0.1 | -| v2.7.5 | v2.0.2 | - +| Rancher Version | Adapter Version | +|-----------------|:----------------:| +| v2.8.0 | v103.0.0+up3.0.0 | ### 1. Gain Access to the Local Cluster @@ -156,4 +150,4 @@ Finally, restart the rancher-csp-adapter deployment to ensure that the updated v kubectl rollout restart deploy rancher-csp-adapter -n cattle-csp-adapter-system ``` -> **Note:** There are methods such as cert-manager's [trust operator](https://cert-manager.io/docs/projects/trust/) which can help reduce the number of manual rotation tasks over time. While these options are not officially supported, they may be useful to users wishing to automate some of these tasks. \ No newline at end of file +> **Note:** Methods such as cert-manager's [trust operator](https://cert-manager.io/docs/projects/trust/) allow you to automate some of these tasks. Although these methods aren't officially supported, they can reduce how often you need to manually rotate certificates. diff --git a/docs/integrations-in-rancher/elemental/elemental.md b/docs/integrations-in-rancher/elemental/elemental.md new file mode 100644 index 00000000000..5e93a4b3538 --- /dev/null +++ b/docs/integrations-in-rancher/elemental/elemental.md @@ -0,0 +1,27 @@ +--- +title: Operating System Management with Elemental +--- + +Elemental enables cloud-native host management. Elemental allows you to onboard any machine in any location, whether its in a datacenter or on the edge, and integrate them seamlessly into Kubernetes while managing your workflows (e.g., OS updates). + +## Elemental with Rancher + +Elemental in Rancher: + +- Is Kubernetes native, which allows you to manage the OS via Elemental in Kubernetes clusters. +- Is nondisruptive from a Kubernetes operational perspective. +- Is declarative and GitOps friendly. +- Allows OCI Image-based flows, which are trusted, deterministic, and predictable. +- Works at scale. It enables fleet-sized OS management. + +### When should I use Elemental? + +- Elemental enables cloud-native OS management from Rancher manager. It works with any OS (e.g., SLE Micro vanilla). +- Elemental allows cloud-native management for machines in datacenters and on the edge. +- Elemental is flexible and allows platform teams to perform all kind of workflows across their fleet of machines. + +## Elemental with Rancher Prime + +- Deeply integrated already as GUI Extension in Rancher. +- Extends the Rancher story to the OS. Working perfectly with SLE Micro for Rancher today. + \ No newline at end of file diff --git a/docs/integrations-in-rancher/epinio/epinio.md b/docs/integrations-in-rancher/epinio/epinio.md new file mode 100644 index 00000000000..fe8e4197f90 --- /dev/null +++ b/docs/integrations-in-rancher/epinio/epinio.md @@ -0,0 +1,22 @@ +--- +title: Application Development Engine with Epinio +--- + + + + + +Epinio is a Kubernetes-based Application Development Platform. It helps operators and developers collaborate without conflict, and accelerates the development process. With Epinio, teams can move from application sources to a live URL in a single step. + +## Epinio with Rancher + +Epinio's integration with Rancher gives developers a jump start, without having to deal with the installation process or configuration. You can install Epinio directly from the Rancher UI's Apps page. + +## Epinio with Rancher Prime + +Rancher Prime customers can expect better integration of Epinio with other areas in the Rancher ecosystem such as: + +- Better integration with Rancher authentication. +- Integration with Neuvector and Kubewarden. +- Custom Helm chart templates with preset annotations to seamlessly integrate with monitoring and other key tools. +- Improved service marketplace. diff --git a/docs/integrations-in-rancher/fleet-gitops-at-scale/architecture.md b/docs/integrations-in-rancher/fleet/architecture.md similarity index 79% rename from docs/integrations-in-rancher/fleet-gitops-at-scale/architecture.md rename to docs/integrations-in-rancher/fleet/architecture.md index 9d64e38de41..f012a3a9921 100644 --- a/docs/integrations-in-rancher/fleet-gitops-at-scale/architecture.md +++ b/docs/integrations-in-rancher/fleet/architecture.md @@ -2,10 +2,6 @@ title: Architecture --- - - - - Fleet can manage deployments from git of raw Kubernetes YAML, Helm charts, or Kustomize or any combination of the three. Regardless of the source, all resources are dynamically turned into Helm charts, and Helm is used as the engine to deploy everything in the cluster. This gives you a high degree of control, consistency, and auditability. Fleet focuses not only on the ability to scale, but to give one a high degree of control and visibility to exactly what is installed on the cluster. ![Architecture](/img/fleet-architecture.svg) diff --git a/docs/integrations-in-rancher/fleet/fleet.md b/docs/integrations-in-rancher/fleet/fleet.md new file mode 100644 index 00000000000..0db52a7e9e1 --- /dev/null +++ b/docs/integrations-in-rancher/fleet/fleet.md @@ -0,0 +1,23 @@ +--- +title: Continuous Delivery with Fleet +--- + +Fleet orchestrates and manages the continuous delivery of applications through the supply chain for fleets of clusters. Fleet organizes the supply chain to help teams deliver with confidence and trust in a timely manner using GitOps as a safe operating model. + +## Fleet with Rancher + +Many users often manage over 10 clusters at a time. Given the proliferation of clusters, continuous delivery is an important part of Rancher. Fleet ensures a reliable continuous delivery experience using GitOps, which is a safe and increasingly common operating model. + +### When should I use Fleet? + +- I need to deploy my monitoring stack (e.g., Grafana, Prometheus) across geographical regions, each with different retention policies. +- I am a platform operator and want to provision clusters with all components using a scalable and safe operating model (GitOps). +- I am an application developer and want to get my latest changes to automatically into my development environment. + +## Fleet with Rancher Prime + +Fleet is already deeply integrated as the Continuous Delivery tool and GitOps Engine in Rancher. + + diff --git a/docs/pages-for-subheaders/fleet-gitops-at-scale.md b/docs/integrations-in-rancher/fleet/overview.md similarity index 81% rename from docs/pages-for-subheaders/fleet-gitops-at-scale.md rename to docs/integrations-in-rancher/fleet/overview.md index 54e38cc48eb..b7e1806fb58 100644 --- a/docs/pages-for-subheaders/fleet-gitops-at-scale.md +++ b/docs/integrations-in-rancher/fleet/overview.md @@ -1,11 +1,7 @@ --- -title: Continuous Delivery with Fleet +title: Overview --- - - - - Continuous Delivery with Fleet is GitOps at scale. Fleet is designed to manage up to a million clusters. It’s also lightweight enough that it works great for a [single cluster](https://fleet.rancher.io/installation#default-install) too, but it really shines when you get to a [large scale](https://fleet.rancher.io/installation#configuration-for-multi-cluster). By large scale we mean either a lot of clusters, a lot of deployments, or a lot of teams in a single organization. Fleet is a separate project from Rancher, and can be installed on any Kubernetes cluster with Helm. @@ -13,7 +9,7 @@ Fleet is a separate project from Rancher, and can be installed on any Kubernetes ## Architecture -For information about how Fleet works, see [this page](../integrations-in-rancher/fleet-gitops-at-scale/architecture.md). +For information about how Fleet works, see the [Architecture](./architecture.md) page. ## Accessing Fleet in the Rancher UI @@ -41,7 +37,7 @@ Follow the steps below to access Continuous Delivery in the Rancher UI: ## Windows Support -For details on support for clusters with Windows nodes, see [this page](../integrations-in-rancher/fleet-gitops-at-scale/windows-support.md). +For details on support for clusters with Windows nodes, see the [Windows Support](./windows-support.md) page. ## GitHub Repository @@ -49,7 +45,7 @@ The Fleet Helm charts are available [here](https://github.com/rancher/fleet/rele ## Using Fleet Behind a Proxy -For details on using Fleet behind a proxy, see [this page](../integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md). +For details on using Fleet behind a proxy, see the [Using Fleet Behind a Proxy](./use-fleet-behind-a-proxy.md) page. ## Helm Chart Dependencies @@ -59,7 +55,7 @@ The Helm chart in the git repository must include its dependencies in the charts ## Troubleshooting -- **Known Issue**: clientSecretName and helmSecretName secrets for Fleet gitrepos are not included in the backup nor restore created by the [backup-restore-operator](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md#1-install-the-rancher-backup-operator). We will update the community once a permanent solution is in place. +- **Known Issue**: clientSecretName and helmSecretName secrets for Fleet gitrepos are not included in the backup nor restore created by the [backup-restore-operator](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md#1-install-the-rancher-backup-operator). We will update the community once a permanent solution is in place. - **Temporary Workaround**: By default, user-defined secrets are not backed up in Fleet. It is necessary to recreate secrets if performing a disaster recovery restore or migration of Rancher into a fresh cluster. To modify resourceSet to include extra resources you want to backup, refer to docs [here](https://github.com/rancher/backup-restore-operator#user-flow). diff --git a/docs/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md b/docs/integrations-in-rancher/fleet/use-fleet-behind-a-proxy.md similarity index 94% rename from docs/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md rename to docs/integrations-in-rancher/fleet/use-fleet-behind-a-proxy.md index 6160a19672a..e6a3f8cf961 100644 --- a/docs/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md +++ b/docs/integrations-in-rancher/fleet/use-fleet-behind-a-proxy.md @@ -2,10 +2,6 @@ title: Using Fleet Behind a Proxy --- - - - - In this section, you'll learn how to enable Fleet in a setup that has a Rancher server with a public IP a Kubernetes cluster that has no public IP, but is configured to use a proxy. Rancher does not establish connections with registered downstream clusters. The Rancher agent deployed on the downstream cluster must be able to establish the connection with Rancher. diff --git a/docs/integrations-in-rancher/fleet-gitops-at-scale/windows-support.md b/docs/integrations-in-rancher/fleet/windows-support.md similarity index 84% rename from docs/integrations-in-rancher/fleet-gitops-at-scale/windows-support.md rename to docs/integrations-in-rancher/fleet/windows-support.md index f7bf04055f9..aea98b74dbc 100644 --- a/docs/integrations-in-rancher/fleet-gitops-at-scale/windows-support.md +++ b/docs/integrations-in-rancher/fleet/windows-support.md @@ -2,10 +2,6 @@ title: Windows Support --- - - - - Prior to Rancher v2.5.6, the `agent` did not have native Windows manifests on downstream clusters with Windows nodes. This would result in a failing `agent` pod for the cluster. If you are upgrading from an older version of Rancher to v2.5.6+, you can deploy a working `agent` with the following workflow *in the downstream cluster*: diff --git a/docs/integrations-in-rancher/harvester/harvester.md b/docs/integrations-in-rancher/harvester/harvester.md new file mode 100644 index 00000000000..c54b817839b --- /dev/null +++ b/docs/integrations-in-rancher/harvester/harvester.md @@ -0,0 +1,11 @@ +--- +title: Virtualization on Kubernetes with Harvester +--- + +## Harvester + +Introduced in Rancher v2.6.1, Harvester is an open-source hyper-converged infrastructure (HCI) software built on Kubernetes. Harvester installs on bare metal servers and provides integrated virtualization and distributed storage capabilities. Although Harvester operates using Kubernetes, it does not require knowledge of Kubernetes concepts, making it more user-friendly. + +## Harvester with Rancher + +With Rancher Prime and Harvester, IT operators now have access to an enterprise-ready, simple-to-use infrastructure platform that cohesively manages their virtual machines and Kubernetes clusters alongside one another. For more information on the support offering, see the [Support Matrix](https://www.suse.com/suse-harvester/support-matrix/all-supported-versions/harvester-v1-2-0/). With the Rancher Virtualization Management feature, users can import and manage multiple Harvester clusters. Leveraging the Rancher's authentication feature and RBAC control for multi-tenancy support. diff --git a/docs/integrations-in-rancher/harvester.md b/docs/integrations-in-rancher/harvester/overview.md similarity index 82% rename from docs/integrations-in-rancher/harvester.md rename to docs/integrations-in-rancher/harvester/overview.md index 300a5826e16..55a9f5b16ac 100644 --- a/docs/integrations-in-rancher/harvester.md +++ b/docs/integrations-in-rancher/harvester/overview.md @@ -1,16 +1,12 @@ --- -title: Harvester Integration +title: Overview --- - - - - Introduced in Rancher v2.6.1, [Harvester](https://docs.harvesterhci.io/) is an open-source hyper-converged infrastructure (HCI) software built on Kubernetes. Harvester installs on bare metal servers and provides integrated virtualization and distributed storage capabilities. Although Harvester operates using Kubernetes, it does not require users to know Kubernetes concepts, making it a more user-friendly application. ### Feature Flag -The Harvester feature flag is used to manage access to the Virtualization Management (VM) page in Rancher where users can navigate directly to Harvester clusters and access the Harvester UI. The Harvester feature flag is enabled by default. Click [here](../pages-for-subheaders/enable-experimental-features.md) for more information on feature flags in Rancher. +The Harvester feature flag is used to manage access to the Virtualization Management (VM) page in Rancher where users can navigate directly to Harvester clusters and access the Harvester UI. The Harvester feature flag is enabled by default. Click [here](../../pages-for-subheaders/enable-experimental-features.md) for more information on feature flags in Rancher. To navigate to the Harvester cluster, click **☰ > Virtualization Management**. From Harvester Clusters page, click one of the clusters listed to go to the single Harvester cluster view. @@ -28,7 +24,7 @@ The [Harvester node driver](https://docs.harvesterhci.io/v1.1/rancher/node/node- Harvester allows `.ISO` images to be uploaded and displayed through the Harvester UI, but this is not supported in the Rancher UI. This is because `.ISO` images usually require additional setup that interferes with a clean deployment (without requiring user intervention), and they are not typically used in cloud environments. -Click [here](../pages-for-subheaders/about-provisioning-drivers.md#node-drivers) for more information on node drivers in Rancher. +See [Provisioning Drivers](../../pages-for-subheaders/about-provisioning-drivers.md#node-drivers) for more information on node drivers in Rancher. ### Port Requirements @@ -40,13 +36,3 @@ In addition, other networking considerations are as follows: - Follow the networking setup guidance [here](https://docs.harvesterhci.io/v1.1/networking/index). For other port requirements for other guest clusters, such as K3s and RKE1, please see [these docs](https://docs.harvesterhci.io/v1.1/install/requirements/#guest-clusters). - -### Limitations - ---- -**Applicable to Rancher v2.6.1 and v2.6.2 only:** - -- Harvester v0.3.0 doesn’t support air-gapped environment installation. -- Harvester v0.3.0 doesn’t support upgrade from v0.2.0 nor upgrade to v1.0.0. - ---- \ No newline at end of file diff --git a/docs/integrations-in-rancher/integrations-in-rancher.mdx b/docs/integrations-in-rancher/integrations-in-rancher.mdx new file mode 100644 index 00000000000..f8420b32752 --- /dev/null +++ b/docs/integrations-in-rancher/integrations-in-rancher.mdx @@ -0,0 +1,66 @@ +--- +title: Integrations in Rancher +--- +import {Card, CardSection} from '@site/src/components/CardComponents'; +import { + ReadingModeMobileRegular, + QuestionRegular, + ArrowUpRegular, + PlayRegular, + FlowchartRegular, + RocketRegular +} from '@fluentui/react-icons'; +import { FaAws, FaGoogle, FaCloud, FaServer, faGear } from "react-icons/fa6"; +import HarvesterIcon from '@site/static/img/harvester_logo_horizontal.svg'; + +Prime is the Rancher ecosystem’s enterprise offering, with additional security, extended lifecycles, and access to Prime-exclusive documentation. Rancher Prime installation assets are hosted on a trusted SUSE registry, owned and managed by Rancher. The trusted Prime registry includes only stable releases that have been community-tested. + +Prime also offers options for production support, as well as add-ons to your subscription that tailor to your commercial needs. + +To learn more and get started with Rancher Prime, please visit [this page](https://www.rancher.com/quick-start). + +} +> + + + + + + + + + + + diff --git a/docs/integrations-in-rancher/kubernetes-distributions/kubernetes-distributions.md b/docs/integrations-in-rancher/kubernetes-distributions/kubernetes-distributions.md new file mode 100644 index 00000000000..b8e6a48e325 --- /dev/null +++ b/docs/integrations-in-rancher/kubernetes-distributions/kubernetes-distributions.md @@ -0,0 +1,31 @@ +--- +title: Kubernetes Distributions +--- + +## K3s + +K3s is a lightweight, fully compliant Kubernetes distribution designed for a range of use cases, including edge computing, IoT, CI/CD, development and embedding Kubernetes into applications. It simplifies Kubernetes management by packaging the system as a single binary, using sqlite3 as the default storage, and offering a user-friendly launcher. K3s includes essential features like local storage and load balancing, Helm chart controller and the Traefik CNI. It minimizes external dependencies and provides a streamlined Kubernetes experience. K3s was donated to the CNCF as a Sandbox Project in June 2020. + +### K3s with Rancher + +- Rancher allows easy provision of K3s across a range of platforms including Amazon EC2, DigitalOcean, Azure, vSphere, or existing servers. +- Standard Rancher management of Kubernetes clusters including all outlined [cluster management capabilities](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md#cluster-management-capabilities-by-cluster-type). + + +## RKE2 + +RKE2 is a compliant Kubernetes distribution developed by Rancher. It is specifically designed for security and compliance within the U.S. Federal Government sector. + +Primary characteristics of RKE2 include: + +1. **Security and Compliance Focus**: RKE2 places a strong emphasis on security and compliance, operating under a "secure by default" framework, making it suitable for government services and highly regulated industries like finance and healthcare. +1. **CIS Kubernetes Benchmark Conformance**: RKE2 comes pre-configured to meet the CIS Kubernetes Hardening Benchmark (currently supporting v1.23 and v1.7), with minimal manual intervention required. +1. **FIPS 140-2 Compliance**: RKE2 complies with the FIPS 140-2 standard using FIPS-validated crypto modules for its components. +1. **Embedded etcd**: RKE2 defaults to using an embedded etcd as its data store. This aligns it more closely with standard Kubernetes practices, allowing better integration with other Kubernetes tools and reducing the risk of misconfiguration. +1. **Alignment with Upstream Kubernetes**: RKE2 aims to stay closely aligned with upstream Kubernetes, reducing the risk of non-conformance that may occur when using distributions that deviate from standard Kubernetes practices. +1. **Multiple CNI Support**: RKE2 offers support for multiple Container Network Interface (CNI) plugins, including Cilium, Calico, and Multus. This is essential for use cases such as telco distribution centers and factories with various production facilities. + +## RKE2 with Rancher + +- Rancher allows easy provision of RKE2 across a range of platforms including Amazon EC2, DigitalOcean, Azure, vSphere, or existing servers. +- Standard Rancher management of Kubernetes clusters including all outlined [cluster management capabilities](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md#cluster-management-capabilities-by-cluster-type). diff --git a/docs/integrations-in-rancher/kubewarden/kubewarden.md b/docs/integrations-in-rancher/kubewarden/kubewarden.md new file mode 100644 index 00000000000..7a6ee36b308 --- /dev/null +++ b/docs/integrations-in-rancher/kubewarden/kubewarden.md @@ -0,0 +1,35 @@ +--- +title: Advanced Policy Management with Kubewarden +--- + + + + + +Kubewarden is a Policy Engine that secures and helps manage your cluster resources. It allows for validation and mutation of resource requests via policies, including context-aware policies and verifying image signatures. It can run policies in monitor or enforcing mode and provides an overview of the state of the cluster. + +Kubewarden aims to be the Universal Policy Engine by enabling and simplifying Policy as Code. Kubewarden policies are compiled into WebAssembly: they are small (400KBs ~ 2MBs), sandboxed, secure, and portable. It aims to be universal by catering to each persona in your organization: + +- Policy User: manage and declare policies using Kubernetes Custom Resources, reuse existing policies written in Rego (OPA and Gatekeeper). Test the policies outside the cluster in CI/CD. +- Policy Developer: write policies in your preferred Wasm-compiling language (Rego, Go, Rust, C#, Swift, Typescript, and more to come). Reuse the ecosystem of tools, libraries, and workflows you already know. +- Policy Distributor: policies are OCI artifacts, serve them through your OCI repository and use industry standards in your infrastructure, like Software-Bill-Of-Materials and artifact signatures. +- Cluster Operator: Kubewarden is modular (OCI registry, PolicyServers, Audit Scanner, Controller). Configure your deployment to suit your needs, segregating different tenants. Get an overview of past, current, and possible violations across the cluster with the Audit Scanner and the PolicyReports. +- Kubewarden Integrator: use it as a platform to write new Kubewarden modules and custom policies. + +## Kubewarden with Rancher + +Kubewarden’s upstream Helm charts are fully integrated as Rancher Apps, providing a UI for the install options. The charts also come with defaults that respect the Rancher stack (for example: not policing Rancher system namespaces), and default PolicyServer and Policies. Users have access to all Kubewarden features and can deploy PolicyServers and Policies manually by interacting with the Kubernetes API (e.g.: using kubectl). + +Kubewarden provides a full replacement of the removed Kubernetes Pod Security Policies. Kubewarden also integrates with the new Pod Security Admission feature introduced by a recent version of Kubernetes by augmenting its security capabilities. + +## Kubewarden with Rancher Prime + +The available Rancher UI Extension for Kubewarden integrates it into the Rancher UI. The UI Extension automates the installation and configuration of the Kubewarden stack and configures access to the policies maintained by SUSE. The UI Extension provides access to a curated catalog of ready-to-use policies. Using the UI Extension, one can browse, install, and configure these policies. + +The UI Extension provides an overview of the Kubewarden stack components and their behavior. This includes access to the Kubewarden metrics and trace events. An operator can understand the impact of policies on the cluster and troubleshoot issues. + +In addition, the UI Extension provides the Policy Reporter UI, which gives a visual overview of the compliance status of the Kubernetes cluster. With this UI, an operator can quickly identify all non-compliant Kubernetes resources, understand the reasons for violations and act accordingly. +All of this with the support offering of Rancher Prime. + + + \ No newline at end of file diff --git a/docs/integrations-in-rancher/longhorn/longhorn.md b/docs/integrations-in-rancher/longhorn/longhorn.md new file mode 100644 index 00000000000..e6f65680932 --- /dev/null +++ b/docs/integrations-in-rancher/longhorn/longhorn.md @@ -0,0 +1,15 @@ +--- +title: Cloud Native Storage with Longhorn +--- + + + + + +## Longhorn + +Longhorn is an official [Cloud Native Computing Foundation project (CNCF)](https://cncf.io/) project that delivers a powerful cloud-native distributed storage platform for Kubernetes that can run anywhere. When combined with Rancher, Longhorn makes the deployment of highly available persistent block storage in your Kubernetes environment easy, fast and reliable. + +## Longhorn with Rancher + +With Rancher Prime and Longhorn, users can easily deploy with 1-click via the Rancher catalog and conduct lifecycle management for managed clusters; empowering the user to install and upgrade, together with draining operation for graceful operations. Longhorn with Rancher also provides mixed cluster support with Windows, Rancher hosted images, UI Proxy access through Rancher, and Rancher monitoring with Longhorn metrics. diff --git a/docs/integrations-in-rancher/longhorn.md b/docs/integrations-in-rancher/longhorn/overview.md similarity index 96% rename from docs/integrations-in-rancher/longhorn.md rename to docs/integrations-in-rancher/longhorn/overview.md index d9daf5421a5..db7e4a62076 100644 --- a/docs/integrations-in-rancher/longhorn.md +++ b/docs/integrations-in-rancher/longhorn/overview.md @@ -1,9 +1,9 @@ --- -title: Longhorn - Cloud native distributed block storage for Kubernetes +title: Overview --- - + [Longhorn](https://longhorn.io/) is a lightweight, reliable, and easy-to-use distributed block storage system for Kubernetes. diff --git a/docs/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md b/docs/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md index 0719a2be236..9125e8536b1 100644 --- a/docs/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md +++ b/docs/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md @@ -118,4 +118,4 @@ For more information on configuring PrometheusRules in Rancher, see [this page.] ## Legacy UI -For information on the dashboards available in v2.2 to v2.4 of Rancher, before the introduction of the `rancher-monitoring` application, see the [Rancher v2.0—v2.4 docs](../../../versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md). +For information on the dashboards available in v2.2 to v2.4 of Rancher, before the introduction of the `rancher-monitoring` application, see the [Rancher v2.0—v2.4 docs](/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md). diff --git a/docs/integrations-in-rancher/neuvector/neuvector.md b/docs/integrations-in-rancher/neuvector/neuvector.md new file mode 100644 index 00000000000..4d823368dee --- /dev/null +++ b/docs/integrations-in-rancher/neuvector/neuvector.md @@ -0,0 +1,27 @@ +--- +title: Container Security with NeuVector +--- + + + + + +NeuVector is the only 100% open source, Zero Trust container security platform. Continuously scan throughout the container lifecycle. Remove security roadblocks. Bake in security policies at the start to maximize developer agility. NeuVector provides vulnerability and compliance scanning and management from build to production. The unique NeuVector run-time protection protects network connections within and ingress/egress to the cluster with a Layer7 container firewall. Additionally, NeuVector monitors process and file activity in containers and on hosts to stop unauthorized activity. + +## NeuVector with Rancher + +All NeuVector features are available through Rancher with integrated deployment and single-sign on to the NeuVector console. Rancher cluster admins are able to deploy and manage the NeuVector deployment on their clusters and easily configure NeuVector through Helm values, configMaps, custom resource definitions (CRDs) and the NeuVector console. + +With NeuVector and Rancher: + +- Deploy, manage and secure multiple clusters. +- Manage and report vulnerabilities and compliance results for Rancher workloads and nodes. + +## NeuVector Prime with Rancher Prime + +The NeuVector UI Extension for Rancher Manager is available and supported for Rancher Prime and NeuVector Prime customers. This extension provides: + +- Automated deployment of NeuVector, including the Rancher Prime NeuVector Extension dashboard. +- Access to important security information from each cluster, such as critical security events, vulnerability scan results, and ingress/egress exposures. +- Integrated vulnerability (CVE) and compliance scan results directly in Rancher resources such as nodes and containers/pods. +- Integrated actions such as manual triggers of scans on Rancher resources. diff --git a/docs/integrations-in-rancher/neuvector.md b/docs/integrations-in-rancher/neuvector/overview.md similarity index 98% rename from docs/integrations-in-rancher/neuvector.md rename to docs/integrations-in-rancher/neuvector/overview.md index fbc5eccec1c..e2701265fc6 100644 --- a/docs/integrations-in-rancher/neuvector.md +++ b/docs/integrations-in-rancher/neuvector/overview.md @@ -1,14 +1,14 @@ --- -title: NeuVector Integration +title: Overview --- - + ### NeuVector Integration in Rancher -[NeuVector 5.x](https://open-docs.neuvector.com/) is an open-source container-centric security platform that is integrated with Rancher. NeuVector offers real-time compliance, visibility, and protection for critical applications and data during runtime. NeuVector provides a firewall, container process/file system monitoring, security auditing with CIS benchmarks, and vulnerability scanning. For more information on Rancher security, please see the [security documentation](../pages-for-subheaders/rancher-security.md). +[NeuVector 5.x](https://open-docs.neuvector.com/) is an open-source container-centric security platform that is integrated with Rancher. NeuVector offers real-time compliance, visibility, and protection for critical applications and data during runtime. NeuVector provides a firewall, container process/file system monitoring, security auditing with CIS benchmarks, and vulnerability scanning. For more information on Rancher security, please see the [security documentation](../../pages-for-subheaders/rancher-security.md). NeuVector can be enabled through a Helm chart that may be installed either through **Apps** or through the **Cluster Tools** button in the Rancher UI. Once the Helm chart is installed, users can easily [deploy and manage NeuVector clusters within Rancher](https://open-docs.neuvector.com/deploying/rancher#deploy-and-manage-neuvector-through-rancher-apps-marketplace). diff --git a/docs/integrations-in-rancher/opni/opni.md b/docs/integrations-in-rancher/opni/opni.md new file mode 100644 index 00000000000..96ec3eb7336 --- /dev/null +++ b/docs/integrations-in-rancher/opni/opni.md @@ -0,0 +1,23 @@ +--- +title: Observability with Opni +--- + + + + + +Opni is a multi-cluster and multi-tenant observability platform. Purpose-built on Kubernetes, Opni simplifies the process of creating and managing backends, agents, and data related to logging, monitoring, and tracing. With built-in AIOps, Opni allows users to swiftly detect anomalous activities in their data. + +Opni components work together to provide a comprehensive observability platform. Key components include: + +- Observability Backends: Opni Logging enhances Opensearch for easy searching, visualization, and analysis of logs, traces and Kubernetes events. Opni Monitoring extends Cortex for multi-cluster, long-term storage of Prometheus metrics. +- Observability Agents: Agents are software that collects observability data (logs, metrics, traces, and events) from their host and sends it to an observability backend. The Opni agent enables collection of logs, Kubernetes events, OpenTelemetry traces, and Prometheus metrics. +- AIOps: Applies AL and machine learning to IT and observability data. Open AIOps features include log anomaly detection using pretrained models for Kubernetes control plane, Rancher and Longhorn. +- Alerting and SLOs: Triggers and reliability targets for services enables utilizing Opni data to effectively make informed decisions regarding software operations. + +## Opni with Rancher + +Opni’s Helm charts are currently maintained in a charts-specific branch of the Opni GitHub project. Once this branch is added as a repository in Rancher, the Opni installation can be performed through the Rancher UI. Efforts are underway now to streamline this process by including these charts directly within Rancher itself, and offering Opni as a fully integrated Rancher App. + +Opni’s log anomaly detection process includes purpose-built, pre-trained models for RKE2, K3s, Longhorn and Rancher agent logs. This advanced modeling ensures first class support for log anomaly detection for the core suite of Rancher products. + diff --git a/docs/integrations-in-rancher/rancher-desktop.md b/docs/integrations-in-rancher/rancher-desktop.md new file mode 100644 index 00000000000..a790e814c0e --- /dev/null +++ b/docs/integrations-in-rancher/rancher-desktop.md @@ -0,0 +1,34 @@ +--- +title: Kubernetes on the Desktop with Rancher Desktop +--- + + + + + + +Rancher Desktop bundles together essential tools for developing and testing cloud-native applications from your desktop. + +If you're working from your local machine on apps intended for cloud environments, you normally need a lot of preparation. You need to select a container run-time, install Kubernetes and popular utilities, and possibly set up a virtual machine. Installing components individually and getting them to work together can be a time-consuming process. + +To reduce the complexity, Rancher Desktop offers teams the following key features: + +- Simple and easy installation on macOS, Linux and Windows operating systems. +- K3s, a ready-to-use, light-weight Kubernetes distribution. +- The ability to easily switch between Kubernetes versions. +- A GUI-based cluster dashboard powered by Rancher to explore your local cluster. +- Freedom to choose your container engine: dockerd (moby) or containerd. +- Preference settings to configure the application to suit your needs. +- Bundled tools required for your container, for Kubernetes-based development, and for operation workflows. +- Periodic updates to keep bundled tools up to date. +- Integration with popular tools/IDEs, including VS Code and Skaffold. +- Image & Registry access control. +- Support for Docker extensions. + +Visit the [Rancher Desktop](https://rancherdesktop.io) website and read the [docs](https://docs.rancherdesktop.io/) to learn more. + +To install Rancher Desktop on your machine, refer to the [installation guide](https://docs.rancherdesktop.io/getting-started/installation). + +## Trying Rancher on Rancher Desktop + +Rancher Desktop offers the setup and tools you need to easily try out containerized, Helm-based applications. You can get started with the Rancher Kubernetes Management platform using Rancher Desktop, by following this [how-to guide](https://docs.rancherdesktop.io/how-to-guides/rancher-on-rancher-desktop). diff --git a/docs/pages-for-subheaders/about-provisioning-drivers.md b/docs/pages-for-subheaders/about-provisioning-drivers.md index 812197b3b3f..1e129210c4b 100644 --- a/docs/pages-for-subheaders/about-provisioning-drivers.md +++ b/docs/pages-for-subheaders/about-provisioning-drivers.md @@ -48,4 +48,4 @@ Rancher supports several major cloud providers, but by default, these node drive There are several other node drivers that are disabled by default, but are packaged in Rancher: -* [Harvester](../integrations-in-rancher/harvester.md#harvester-node-driver/), available in Rancher v2.6.1 +* [Harvester](../integrations-in-rancher/harvester/overview.md#harvester-node-driver/), available as of Rancher v2.6.1 diff --git a/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md b/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md index 6bda26af36e..58fcb58c1cb 100644 --- a/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md +++ b/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -50,7 +50,7 @@ Longhorn is free, open source software. Originally developed by Rancher Labs, it If you have a pool of block storage, Longhorn can help you provide persistent storage to your Kubernetes cluster without relying on cloud providers. For more information about Longhorn features, refer to the [documentation.](https://longhorn.io/docs/latest/what-is-longhorn/) -Rancher v2.5 simplified the process of installing Longhorn on a Rancher-managed cluster. For more information, see [this page.](../integrations-in-rancher/longhorn.md) +Rancher v2.5 simplified the process of installing Longhorn on a Rancher-managed cluster. For more information, see [Cloud Native Storage with Longhorn](../integrations-in-rancher/longhorn/longhorn.md). ### Provisioning Storage Examples diff --git a/docs/pages-for-subheaders/rancher-security.md b/docs/pages-for-subheaders/rancher-security.md index 67c496fe24d..81c0c40da24 100644 --- a/docs/pages-for-subheaders/rancher-security.md +++ b/docs/pages-for-subheaders/rancher-security.md @@ -29,7 +29,7 @@ On this page, we provide security related documentation along with resources to ### NeuVector Integration with Rancher -NeuVector is an open-source, container-focused security application that is now integrated into Rancher. NeuVector provides production security, DevOps vulnerability protection, and a container firewall, et al. Please see the [Rancher docs](../integrations-in-rancher/neuvector.md) and the [NeuVector docs](https://open-docs.neuvector.com/) for more information. +NeuVector is an open-source, container-focused security application that is now integrated into Rancher. NeuVector provides production security, DevOps vulnerability protection, and a container firewall, et al. Please see the [Rancher docs](../integrations-in-rancher/neuvector/neuvector.md) and the [NeuVector docs](https://open-docs.neuvector.com/) for more information. ### Running a CIS Security Scan on a Kubernetes Cluster diff --git a/docs/reference-guides/rancher-security/rancher-webhook-hardening.md b/docs/reference-guides/rancher-security/rancher-webhook-hardening.md new file mode 100644 index 00000000000..0362deecc5c --- /dev/null +++ b/docs/reference-guides/rancher-security/rancher-webhook-hardening.md @@ -0,0 +1,133 @@ +--- +title: Hardening the Rancher Webhook +--- + +Rancher Webhook is an important component within Rancher, playing a role in enforcing security requirements for Rancher and its workloads. To decrease its attack surface, access to it should be limited to the only valid caller it has: the Kubernetes API server. This can be done by using network policies and authentication independently or in conjunction with each other to harden the webhook against attacks. + +## Block External Traffic Using Network Policies + +The webhook is only expected to accept requests from the Kubernetes API server. By default, however, the webhook can accept traffic from any source. If you are using a CNI that supports Network Policies, you can create a policy that blocks traffic that doesn't originate from the API server. + +The built-in NetworkPolicy resource in Kubernetes can't block or admit traffic from the cluster hosts, and the `kube-apiserver` process is always running on the host network. Therefore, you must use the advanced network policy resources from the CNI in use. Examples for Calico and Cilium follow. Consult the documentation for your CNI for more details. + +### Calico + +Use the NetworkPolicy resource in the `crd.projectcalico.org/v1` API group. Use the selector `app == 'rancher-webhook'` to create a rule for the webhook, and set the CIDR of the control plane hosts as the ingress source: + +```yaml +apiVersion: crd.projectcalico.org/v1 +kind: NetworkPolicy +metadata: + name: allow-k8s + namespace: cattle-system +spec: + selector: app == 'rancher-webhook' + types: + - Ingress + ingress: + - action: Allow + protocol: TCP + source: + nets: + - 192.168.42.0/24 # CIDR of the control plane host. May list more than 1 if the hosts are in different subnets. + destination: + selector: + app == 'rancher-webhook' +``` + +### Cilium + +Use the CiliumNetworkPolicy resource in the `cilium.io/v2` API group. Add the `host` and `remote-node` keys to the `fromEntities` ingress rule. This blocks in-cluster and external traffic while allowing traffic from the hosts. + +```yaml +apiVersion: "cilium.io/v2" +kind: CiliumNetworkPolicy +metadata: + name: allow-k8s + namespace: cattle-system +spec: + endpointSelector: + matchLabels: + app: rancher-webhook + ingress: + - fromEntities: + - host + - remote-node +``` + +## Require the Kubernetes API Server to Authenticate to the Webhook + +The webhook should only accept requests from the Kubernetes API server. By default, the webhook doesn't require clients to authenticate to it. It will accept any request. You can configure the webhook to require credentials so that only the API server can access it. More information can be found in the [Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#authenticate-apiservers). + +1. Configure the API server to present a client certificate to the webhook, pointing to an AdmissionConfiguration file to configure the ValidatingAdmissionWebhook and MutatingAdmissionWebhook plugins: + + ```yaml + # /etc/rancher/admission/admission.yaml + apiVersion: apiserver.config.k8s.io/v1 + kind: AdmissionConfiguration + plugins: + - name: ValidatingAdmissionWebhook + configuration: + apiVersion: apiserver.config.k8s.io/v1 + kind: WebhookAdmissionConfiguration + kubeConfigFile: "/etc/rancher/admission/kubeconfig" + - name: MutatingAdmissionWebhook + configuration: + apiVersion: apiserver.config.k8s.io/v1 + kind: WebhookAdmissionConfiguration + kubeConfigFile: "/etc/rancher/admission/kubeconfig" + ``` + + This is also the same config file where other admission plugins are configured, such as PodSecurity. If your distro or your setup uses additional admission plugins, configure those as well. For example, add [RKE2's PodSecurity configuration](https://docs.rke2.io/security/pod_security_standards) to this file. + +2. Create the kubeconfig file that the admission plugins refer to. Rancher Webhook only supports client certificate authentication, so generate a TLS key pair, and set the kubeconfig to use either `client-certificate` and `client-key` or `client-certificate-data` and `client-key-data`. For example: + + ```yaml + # /etc/rancher/admission/kubeconfig + apiVersion: v1 + kind: Config + users: + - name: 'rancher-webhook.cattle-system.svc' + user: + client-certificate: /path/to/client/cert + client-key: /path/to/client/key + ``` + +3. Start the kube-apiserver binary with the flag `--admission-control-config-file` pointing to your AdmissionConfiguration file. The way to do this varies by distro, and it isn't supported universally, such as in hosted Kubernetes providers. Consult the documentation for your Kubernetes distribution. + + For RKE2, `rke2-server` can be started with a config file like so: + + ```yaml + # /etc/rancher/rke2/config.yaml + kube-apiserver-arg: + - admission-control-config-file=/etc/rancher/admission/admission.yaml + kube-apiserver-extra-mount: + - /etc/rancher/admission:/etc/rancher/admission:ro + ``` + + :::danger + Some distros set this flag by default. If your distro provisions its own AdmissionConfiguration, you must include it in your custom admission control config file. For example, RKE2 installs an AdmissionConfiguration file at `/etc/rancher/rke2/rke2-pss.yaml`, which configures the PodSecurity admission plugin. Setting `admission-control-config-file` in config.yaml will override this essential security setting. To include both plugins, consult [the Default Pod Security Standards documentation](https://docs.rke2.io/security/pod_security_standards) and copy the appropriate plugin configuration to your admission.yaml. + ::: + +4. If you're using Rancher to provision your cluster using existing nodes, create these files on the node before you provision them. + + If you're using Rancher to provision your cluster on new nodes, allow the provisioning to complete, then use the provided SSH key and IP address to connect to the nodes, and place the RKE2 config file in the `/etc/rancher/rke2/config.yaml.d/` directory. + +5. After the cluster is configured with these credentials, configure the Rancher cluster agent to enable authentication in the webhook. Create a file containing these chart values: + + ```yaml + # values.yaml + auth: + clientCA: + allowedCNs: + - + - + ``` + +6. Create a configmap in the `cattle-system` namespace on the provisioned cluster with these values: + + ``` + kubectl --namespace cattle-system create configmap rancher-config --from-file=rancher-webhook=values.yaml + ``` + + The webhook will restart with these values. diff --git a/docusaurus.config.js b/docusaurus.config.js index 6db813113de..f398240e009 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -646,15 +646,15 @@ module.exports = { from: '/explanations/integrations-in-rancher/cis-scans/custom-benchmark' }, { - to: '/integrations-in-rancher/fleet-gitops-at-scale/architecture', + to: '/integrations-in-rancher/fleet/architecture', from: '/explanations/integrations-in-rancher/fleet-gitops-at-scale/architecture' }, { - to: '/integrations-in-rancher/fleet-gitops-at-scale/windows-support', + to: '/integrations-in-rancher/fleet/windows-support', from: '/explanations/integrations-in-rancher/fleet-gitops-at-scale/windows-support' }, { - to: '/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy', + to: '/integrations-in-rancher/fleet/use-fleet-behind-a-proxy', from: '/explanations/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy' }, { diff --git a/sidebars.js b/sidebars.js index ce3687e759b..8aed9ca6ece 100644 --- a/sidebars.js +++ b/sidebars.js @@ -1118,14 +1118,72 @@ const sidebars = { "reference-guides/rancher-security/rancher-security-best-practices", "reference-guides/rancher-security/security-advisories-and-cves", "reference-guides/rancher-security/psa-restricted-exemptions", + "reference-guides/rancher-security/rancher-webhook-hardening" ], } ] }, { - type: 'category', - label: 'Integrations in Rancher', - items: [ + "type": "category", + "label": "Integrations in Rancher", + "link": { + "type": "doc", + "id": "integrations-in-rancher/integrations-in-rancher" + }, + "items": [ + "integrations-in-rancher/kubernetes-distributions/kubernetes-distributions", + { + "type": "category", + "label": "Virtualization on Kubernetes with Harvester", + "link": { + "type": "doc", + "id": "integrations-in-rancher/harvester/harvester" + }, + "items": [ + "integrations-in-rancher/harvester/overview" + ] + }, + { + "type": "category", + "label": "Cloud Native Storage with Longhorn", + "link": { + "type": "doc", + "id": "integrations-in-rancher/longhorn/longhorn" + }, + "items": [ + "integrations-in-rancher/longhorn/overview" + ] + }, + { + "type": "category", + "label": "Container Security with Neuvector", + "link": { + "type": "doc", + "id": "integrations-in-rancher/neuvector/neuvector" + }, + "items": [ + "integrations-in-rancher/neuvector/overview" + ] + }, + "integrations-in-rancher/kubewarden/kubewarden", + "integrations-in-rancher/elemental/elemental", + "integrations-in-rancher/opni/opni", + { + "type": "category", + "label": "Continuous Delivery with Fleet", + "link": { + "type": "doc", + "id": "integrations-in-rancher/fleet/fleet" + }, + "items": [ + "integrations-in-rancher/fleet/overview", + "integrations-in-rancher/fleet/architecture", + "integrations-in-rancher/fleet/windows-support", + "integrations-in-rancher/fleet/use-fleet-behind-a-proxy" + ] + }, + "integrations-in-rancher/rancher-desktop", + "integrations-in-rancher/epinio/epinio", { type: 'category', label: 'Cloud Marketplace Integration', @@ -1165,20 +1223,6 @@ const sidebars = { "integrations-in-rancher/cis-scans/custom-benchmark", ], }, - { - type: 'category', - label: 'Continuous Delivery with Fleet', - link: { - type: 'doc', - id: "pages-for-subheaders/fleet-gitops-at-scale", - }, - items: [ - "integrations-in-rancher/fleet-gitops-at-scale/architecture", - "integrations-in-rancher/fleet-gitops-at-scale/windows-support", - "integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy", - ] - }, - "integrations-in-rancher/harvester", { type: 'category', label: 'Istio', @@ -1206,7 +1250,6 @@ const sidebars = { } ] }, - "integrations-in-rancher/longhorn", { type: 'category', label: 'Logging', @@ -1248,10 +1291,7 @@ const sidebars = { "integrations-in-rancher/monitoring-and-alerting/promql-expressions", ] }, - "integrations-in-rancher/neuvector", - "integrations-in-rancher/opa-gatekeeper", - "integrations-in-rancher/rancher-extensions", ] }, @@ -1305,6 +1345,21 @@ const sidebars = { } ] }, + { + "type": "category", + "label": "Rancher Kubernetes API", + "items": [ + "api/quickstart", + { + "type": "category", + "label": "Example Workflows", + "items": [ + "api/workflows/projects" + ] + }, + "api/api-reference" + ] + }, "contribute-to-rancher", ] }