From 46a972fa3eb5cbea33ca001f6c2c1df432449f47 Mon Sep 17 00:00:00 2001 From: Catherine Luse Date: Tue, 11 Feb 2020 14:30:47 -0700 Subject: [PATCH] Remove spaces from links --- content/_index.html | 10 ++--- content/k3s/latest/en/architecture/_index.md | 4 +- content/k3s/latest/en/installation/_index.md | 8 ++-- .../en/installation/datastore/_index.md | 2 +- .../k3s/latest/en/installation/ha/_index.md | 6 +-- .../en/installation/install-options/_index.md | 2 +- .../en/installation/network-options/_index.md | 2 +- content/k3s/latest/en/networking/_index.md | 7 ++-- content/os/v1.x/en/_index.md | 2 +- content/os/v1.x/en/about/_index.md | 2 +- .../running-rancher-on-rancherOS/_index.md | 4 +- .../v1.x/en/installation/amazon-ecs/_index.md | 6 +-- .../built-in-system-services/_index.md | 18 ++++----- .../boot-process/cloud-init/_index.md | 4 +- .../en/installation/configuration/_index.md | 2 +- .../adding-kernel-parameters/_index.md | 2 +- .../airgap-configuration/_index.md | 8 ++-- .../configuration/date-and-timezone/_index.md | 2 +- .../configuration/docker/_index.md | 6 +-- .../configuration/hostname/_index.md | 2 +- .../private-registries/_index.md | 4 +- .../configuration/running-commands/_index.md | 2 +- .../configuration/ssh-keys/_index.md | 2 +- .../switching-consoles/_index.md | 8 ++-- .../switching-docker-versions/_index.md | 4 +- .../configuration/users/_index.md | 2 +- .../custom-builds/custom-console/_index.md | 8 ++-- .../custom-builds/custom-kernels/_index.md | 4 +- .../custom-rancheros-iso/_index.md | 6 +-- .../installation/running-rancheros/_index.md | 26 ++++++------ .../running-rancheros/cloud/aliyun/_index.md | 4 +- .../running-rancheros/cloud/aws/_index.md | 2 +- .../running-rancheros/cloud/do/_index.md | 2 +- .../running-rancheros/cloud/gce/_index.md | 16 ++++---- .../cloud/openstack/_index.md | 2 +- .../server/install-to-disk/_index.md | 6 +-- .../running-rancheros/server/pxe/_index.md | 4 +- .../server/raspberry-pi/_index.md | 2 +- .../workstation/boot-from-iso/_index.md | 4 +- .../workstation/docker-machine/_index.md | 4 +- .../storage/additional-mounts/_index.md | 2 +- .../storage/state-partition/_index.md | 2 +- .../custom-system-services/_index.md | 2 +- content/os/v1.x/en/overview/_index.md | 6 +-- .../os/v1.x/en/quick-start-guide/_index.md | 4 +- content/os/v1.x/en/upgrading/_index.md | 6 +-- .../rancher/v2.x/en/admin-settings/_index.md | 14 +++---- .../authentication/ad/_index.md | 6 +-- .../authentication/azure-ad/_index.md | 40 ++++++++++++++----- .../authentication/freeipa/_index.md | 2 +- .../authentication/github/_index.md | 2 +- .../authentication/keycloak/_index.md | 2 +- .../authentication/microsoft-adfs/_index.md | 6 +-- .../microsoft-adfs-setup/_index.md | 2 +- .../rancher-adfs-setup/_index.md | 2 +- .../authentication/openldap/_index.md | 8 ++-- .../authentication/user-groups/_index.md | 4 +- .../v2.x/en/admin-settings/drivers/_index.md | 24 +++++------ .../drivers/cluster-drivers/_index.md | 6 +-- .../drivers/node-drivers/_index.md | 4 +- .../pod-security-policies/_index.md | 8 ++-- .../v2.x/en/admin-settings/rbac/_index.md | 6 +-- .../rbac/cluster-project-roles/_index.md | 4 +- .../rbac/default-custom-roles/_index.md | 6 +-- .../rbac/locked-roles/_index.md | 2 +- content/rancher/v2.x/en/api/_index.md | 4 +- content/rancher/v2.x/en/backups/_index.md | 8 ++-- .../rancher/v2.x/en/backups/backups/_index.md | 2 +- .../backups/single-node-backups/_index.md | 4 +- .../v2.x/en/backups/restorations/_index.md | 6 +-- .../restorations/ha-restoration/_index.md | 16 ++++---- .../single-node-restoration/_index.md | 8 ++-- .../rancher/v2.x/en/best-practices/_index.md | 8 ++-- .../best-practices/deployment-types/_index.md | 4 +- .../en/best-practices/management/_index.md | 10 ++--- content/rancher/v2.x/en/catalog/_index.md | 16 ++++---- .../rancher/v2.x/en/catalog/apps/_index.md | 8 ++-- .../v2.x/en/catalog/built-in/_index.md | 10 ++--- .../rancher/v2.x/en/catalog/custom/_index.md | 8 ++-- .../v2.x/en/catalog/custom/adding/_index.md | 28 ++++++------- .../v2.x/en/catalog/custom/creating/_index.md | 4 +- .../v2.x/en/catalog/globaldns/_index.md | 14 +++---- .../en/catalog/multi-cluster-apps/_index.md | 14 +++---- content/rancher/v2.x/en/cli/_index.md | 24 +++++------ .../rancher/v2.x/en/cluster-admin/_index.md | 28 ++++++------- .../cluster-admin/backing-up-etcd/_index.md | 8 ++-- .../cleaning-cluster-nodes/_index.md | 12 +++--- .../cluster-admin/cloning-clusters/_index.md | 10 ++--- .../en/cluster-admin/cluster-access/_index.md | 8 ++-- .../cluster-access/cluster-members/_index.md | 14 +++---- .../cluster-admin/editing-clusters/_index.md | 20 +++++----- .../v2.x/en/cluster-admin/nodes/_index.md | 32 +++++++-------- .../pod-security-policy/_index.md | 8 ++-- .../en/cluster-admin/restoring-etcd/_index.md | 12 +++--- .../en/cluster-admin/tools/alerts/_index.md | 16 ++++---- .../tools/alerts/default-alerts/_index.md | 2 +- .../en/cluster-admin/tools/logging/_index.md | 16 ++++---- .../tools/logging/splunk/_index.md | 4 +- .../cluster-admin/tools/monitoring/_index.md | 16 ++++---- .../monitoring/cluster-metrics/_index.md | 16 ++++---- .../tools/monitoring/prometheus/_index.md | 8 ++-- .../monitoring/viewing-metrics/_index.md | 8 ++-- .../cluster-admin/tools/notifiers/_index.md | 4 +- .../upgrading-kubernetes/_index.md | 4 +- .../attaching-existing-storage/_index.md | 2 +- .../volumes-and-storage/examples/_index.md | 4 +- .../examples/nfs/_index.md | 2 +- .../examples/vsphere/_index.md | 8 ++-- .../how-storage-works/_index.md | 2 +- .../iscsi-volumes/_index.md | 2 +- .../provisioning-new-storage/_index.md | 2 +- .../hosted-kubernetes-clusters/_index.md | 12 +++--- .../hosted-kubernetes-clusters/ack/_index.md | 2 +- .../hosted-kubernetes-clusters/cce/_index.md | 2 +- .../hosted-kubernetes-clusters/eks/_index.md | 2 +- .../hosted-kubernetes-clusters/tke/_index.md | 2 +- .../production/nodes-and-roles/_index.md | 2 +- .../rke-clusters/custom-nodes/_index.md | 8 ++-- .../rke-clusters/node-pools/_index.md | 6 +-- .../rke-clusters/node-pools/ec2/_index.md | 4 +- .../provisioning-vsphere-clusters/_index.md | 2 +- .../node-template-reference/_index.md | 6 +-- .../rke-clusters/options/_index.md | 28 ++++++------- .../options/pod-security-policies/_index.md | 2 +- .../rke-clusters/rancher-agents/_index.md | 4 +- .../rke-clusters/windows-clusters/_index.md | 22 +++++----- .../docs-for-2.1-and-2.2/_index.md | 24 +++++------ .../rancher/v2.x/en/contributing/_index.md | 2 +- .../rancher/v2.x/en/faq/networking/_index.md | 2 +- .../en/faq/networking/cni-providers/_index.md | 20 +++++----- .../rancher/v2.x/en/faq/security/_index.md | 4 +- .../rancher/v2.x/en/faq/technical/_index.md | 8 ++-- .../rancher/v2.x/en/k8s-in-rancher/_index.md | 38 +++++++++--------- .../en/k8s-in-rancher/certificates/_index.md | 4 +- .../en/k8s-in-rancher/configmaps/_index.md | 4 +- .../horitzontal-pod-autoscaler/_index.md | 10 ++--- .../hpa-background/_index.md | 2 +- .../manage-hpa-with-kubectl/_index.md | 4 +- .../manage-hpa-with-rancher-ui/_index.md | 4 +- .../testing-hpa/_index.md | 2 +- .../load-balancers-and-ingress/_index.md | 16 ++++---- .../ingress/_index.md | 6 +-- .../load-balancers/_index.md | 6 +-- .../en/k8s-in-rancher/pipelines/_index.md | 18 ++++----- .../pipelines/example-repos/_index.md | 4 +- .../en/k8s-in-rancher/registries/_index.md | 2 +- .../v2.x/en/k8s-in-rancher/secrets/_index.md | 4 +- .../service-discovery/_index.md | 2 +- .../en/k8s-in-rancher/workloads/_index.md | 6 +-- .../workloads/deploy-workloads/_index.md | 12 +++--- content/rancher/v2.x/en/overview/_index.md | 4 +- .../architecture-recommendations/_index.md | 4 +- .../v2.x/en/overview/architecture/_index.md | 4 +- .../rancher/v2.x/en/project-admin/_index.md | 14 +++---- .../en/project-admin/namespaces/_index.md | 24 +++++------ .../v2.x/en/project-admin/pipelines/_index.md | 18 ++++----- .../pipelines/docs-for-v2.0.x/_index.md | 2 +- .../pod-security-policies/_index.md | 6 +-- .../project-admin/project-members/_index.md | 10 ++--- .../project-admin/resource-quotas/_index.md | 8 ++-- .../override-container-default/_index.md | 2 +- .../override-namespace-default/_index.md | 10 ++--- .../quotas-for-projects/_index.md | 8 ++-- .../en/project-admin/tools/alerts/_index.md | 10 ++--- .../en/project-admin/tools/logging/_index.md | 14 +++---- .../project-admin/tools/monitoring/_index.md | 18 ++++----- .../v2.x/en/quick-start-guide/_index.md | 8 ++-- .../deployment/amazon-aws-qs/_index.md | 2 +- .../deployment/digital-ocean-qs/_index.md | 2 +- .../quickstart-manual-setup/_index.md | 4 +- .../deployment/quickstart-vagrant/_index.md | 2 +- .../_index.md | 6 +-- .../_index.md | 12 +++--- content/rancher/v2.x/en/security/_index.md | 2 +- .../v2.x/en/security/hardening-2.1/_index.md | 2 +- .../v2.x/en/security/hardening-2.2/_index.md | 2 +- .../en/security/hardening-2.3.3/_index.md | 4 +- .../v2.x/en/security/hardening-2.3/_index.md | 4 +- .../rancher/v2.x/en/system-tools/_index.md | 8 ++-- .../rancher/v2.x/en/troubleshooting/_index.md | 16 ++++---- .../v2.x/en/troubleshooting/dns/_index.md | 4 +- .../kubernetes-components/_index.md | 4 +- .../controlplane/_index.md | 2 +- .../kubernetes-resources/_index.md | 2 +- .../en/troubleshooting/networking/_index.md | 2 +- content/rancher/v2.x/en/upgrades/_index.md | 4 +- .../v2.x/en/upgrades/rollbacks/_index.md | 2 +- .../rollbacks/ha-server-rollbacks/_index.md | 2 +- .../rollbacks/single-node-rollbacks/_index.md | 6 +-- .../v2.x/en/upgrades/upgrades/_index.md | 10 ++--- .../v2.x/en/upgrades/upgrades/ha/_index.md | 14 +++---- .../en/upgrades/upgrades/ha/helm2/_index.md | 14 +++---- .../migrating-from-rke-add-on/_index.md | 6 +-- .../upgrades/namespace-migration/_index.md | 8 ++-- .../upgrades/upgrades/single-node/_index.md | 22 +++++----- .../rancher/v2.x/en/user-settings/_index.md | 10 ++--- .../v2.x/en/user-settings/api-keys/_index.md | 4 +- .../user-settings/cloud-credentials/_index.md | 12 +++--- .../en/user-settings/node-templates/_index.md | 10 ++--- .../rancher/v2.x/en/v1.6-migration/_index.md | 18 ++++----- .../discover-services/_index.md | 12 +++--- .../v1.6-migration/expose-services/_index.md | 8 ++-- .../en/v1.6-migration/get-started/_index.md | 30 +++++++------- .../en/v1.6-migration/kub-intro/_index.md | 2 +- .../v1.6-migration/load-balancing/_index.md | 34 ++++++++-------- .../en/v1.6-migration/monitor-apps/_index.md | 16 ++++---- .../run-migration-tool/_index.md | 38 +++++++++--------- .../schedule-workloads/_index.md | 32 +++++++-------- content/rke/latest/en/cert-mgmt/_index.md | 4 +- .../rke/latest/en/config-options/_index.md | 38 +++++++++--------- .../en/config-options/add-ons/_index.md | 12 +++--- .../en/config-options/add-ons/dns/_index.md | 4 +- .../add-ons/ingress-controllers/_index.md | 2 +- .../add-ons/metrics-server/_index.md | 2 +- .../add-ons/network-plugins/_index.md | 2 +- .../add-ons/user-defined-add-ons/_index.md | 2 +- .../en/config-options/bastion-host/_index.md | 2 +- .../config-options/cloud-providers/_index.md | 10 ++--- .../vsphere/troubleshooting/_index.md | 4 +- .../latest/en/config-options/nodes/_index.md | 6 +-- .../private-registries/_index.md | 8 ++-- .../en/config-options/services/_index.md | 12 +++--- .../services/external-etcd/_index.md | 2 +- .../en/config-options/system-images/_index.md | 2 +- .../rke/latest/en/etcd-snapshots/_index.md | 2 +- .../one-time-snapshots/_index.md | 8 ++-- .../restoring-from-backup/_index.md | 10 ++--- content/rke/latest/en/example-yamls/_index.md | 4 +- content/rke/latest/en/installation/_index.md | 18 ++++----- .../latest/en/installation/certs/_index.md | 4 +- .../rke/latest/en/managing-clusters/_index.md | 4 +- content/rke/latest/en/os/_index.md | 4 +- .../rke/latest/en/troubleshooting/_index.md | 4 +- .../provisioning-errors/_index.md | 2 +- content/rke/latest/en/upgrades/_index.md | 10 ++--- 235 files changed, 955 insertions(+), 938 deletions(-) diff --git a/content/_index.html b/content/_index.html index 583a56f29b9..fd31063084b 100644 --- a/content/_index.html +++ b/content/_index.html @@ -69,7 +69,7 @@
- + @@ -110,7 +110,7 @@

Rancher manages all of your Kubernetes clusters everywhere, unifies them under centralized RBAC, monitors them and lets you easily deploy and manage workloads through an intuitive user interface.

- + @@ -164,7 +164,7 @@

RancherOS is the lightest, easiest way to run Docker in production. Engineered from the ground up for security and speed, it runs all system services and user workloads within Docker containers.

- + @@ -191,7 +191,7 @@

Rancher Kubernetes Engine (RKE) is an extremely simple, lightning fast Kubernetes installer that works everywhere.

- + @@ -218,7 +218,7 @@

Lightweight Kubernetes. Easy to install, half the memory, all in a binary less than 40mb.

- + diff --git a/content/k3s/latest/en/architecture/_index.md b/content/k3s/latest/en/architecture/_index.md index 0b04ddbfd8c..6b116eb62e2 100644 --- a/content/k3s/latest/en/architecture/_index.md +++ b/content/k3s/latest/en/architecture/_index.md @@ -33,7 +33,7 @@ Single server clusters can meet a variety of use cases, but for environments whe * An **external datastore** (as opposed to the embedded SQLite datastore used in single-server setups)
K3s Architecture with a High-availability Server
-![Architecture]({{< baseurl >}}/img/rancher/k3s-architecture-ha-server.png) +![Architecture]({{}}/img/rancher/k3s-architecture-ha-server.png) ### Fixed Registration Address for Agent Nodes @@ -41,7 +41,7 @@ In the high-availability server configuration, each node must also register with After registration, the agent nodes establish a connection directly to one of the server nodes. -![k3s HA]({{< baseurl >}}/img/k3s/k3s-production-setup.svg) +![k3s HA]({{}}/img/k3s/k3s-production-setup.svg) # How Agent Node Registration Works diff --git a/content/k3s/latest/en/installation/_index.md b/content/k3s/latest/en/installation/_index.md index 68c2d7ffa8b..b141bcce42b 100644 --- a/content/k3s/latest/en/installation/_index.md +++ b/content/k3s/latest/en/installation/_index.md @@ -5,13 +5,13 @@ weight: 20 This section contains instructions for installing K3s in various environments. Please ensure you have met the [Installation Requirements]({{< baseurl >}}/k3s/latest/en/installation/installation-requirements/) before you begin installing K3s. -[Installation and Configuration Options]({{< baseurl >}}/k3s/latest/en/installation/install-options/) provides guidance on the options available to you when installing K3s. +[Installation and Configuration Options]({{}}/k3s/latest/en/installation/install-options/) provides guidance on the options available to you when installing K3s. -[High Availability with an External DB]({{< baseurl >}}/k3s/latest/en/installation/ha/) details how to set up an HA K3s cluster backed by an external datastore such as MySQL, PostgreSQL, or etcd. +[High Availability with an External DB]({{}}/k3s/latest/en/installation/ha/) details how to set up an HA K3s cluster backed by an external datastore such as MySQL, PostgreSQL, or etcd. -[High Availability with Embedded DB (Experimental)]({{< baseurl >}}/k3s/latest/en/installation/ha-embedded/) details how to set up an HA K3s cluster that leverages a built-in distributed database. +[High Availability with Embedded DB (Experimental)]({{}}/k3s/latest/en/installation/ha-embedded/) details how to set up an HA K3s cluster that leverages a built-in distributed database. -[Air-Gap Installation]({{< baseurl >}}/k3s/latest/en/installation/airgap/) details how to set up K3s in environments that do not have direct access to the Internet. +[Air-Gap Installation]({{}}/k3s/latest/en/installation/airgap/) details how to set up K3s in environments that do not have direct access to the Internet. ### Uninstalling diff --git a/content/k3s/latest/en/installation/datastore/_index.md b/content/k3s/latest/en/installation/datastore/_index.md index 85bb151781b..9d04be68d54 100644 --- a/content/k3s/latest/en/installation/datastore/_index.md +++ b/content/k3s/latest/en/installation/datastore/_index.md @@ -95,4 +95,4 @@ k3s server ``` ### Embedded DQLite for HA (Experimental) -K3s's use of DQLite is similar to its use of SQLite. It is simple to set up and manage. As such, there is no external configuration or additional steps to take in order to use this option. Please see [High Availability with Embedded DB (Experimental)]({{< baseurl >}}/k3s/latest/en/installation/ha-embedded/) for instructions on how to run with this option. +K3s's use of DQLite is similar to its use of SQLite. It is simple to set up and manage. As such, there is no external configuration or additional steps to take in order to use this option. Please see [High Availability with Embedded DB (Experimental)]({{}}/k3s/latest/en/installation/ha-embedded/) for instructions on how to run with this option. diff --git a/content/k3s/latest/en/installation/ha/_index.md b/content/k3s/latest/en/installation/ha/_index.md index eb945f74a51..f7310184015 100644 --- a/content/k3s/latest/en/installation/ha/_index.md +++ b/content/k3s/latest/en/installation/ha/_index.md @@ -28,12 +28,10 @@ Setting up an HA cluster requires the following steps: 4. [Join agent nodes](#4-optional-join-agent-nodes) ### 1. Create an External Datastore - -You will first need to create an external datastore for the cluster. See the [Cluster Datastore Options]({{< baseurl >}}/k3s/latest/en/installation/datastore/) documentation for more details. +You will first need to create an external datastore for the cluster. See the [Cluster Datastore Options]({{}}/k3s/latest/en/installation/datastore/) documentation for more details. ### 2. Launch Server Nodes - -K3s requires two or more server nodes for this HA configuration. See the [Installation Requirements]({{< baseurl >}}/k3s/latest/en/installation/installation-requirements/) for minimum machine requirements. +K3s requires two or more server nodes for this HA configuration. See the [Installation Requirements]({{}}/k3s/latest/en/installation/node-requirements/) guide for minimum machine requirements. When running the `k3s server` command on these nodes, you must set the `datastore-endpoint` parameter so that K3s knows how to connect to the external datastore. diff --git a/content/k3s/latest/en/installation/install-options/_index.md b/content/k3s/latest/en/installation/install-options/_index.md index f5054869d74..e2bc4a2dbb2 100644 --- a/content/k3s/latest/en/installation/install-options/_index.md +++ b/content/k3s/latest/en/installation/install-options/_index.md @@ -14,7 +14,7 @@ For more advanced options, refer to [this page.]({{}}/k3s/latest/en/adv # Installation Script Options -As mentioned in the [Quick-Start Guide]({{< baseurl >}}/k3s/latest/en/quick-start/), you can use the installation script available at https://get.k3s.io to install K3s as a service on systemd and openrc based systems. +As mentioned in the [Quick-Start Guide]({{}}/k3s/latest/en/quick-start/), you can use the installation script available at https://get.k3s.io to install K3s as a service on systemd and openrc based systems. The simplest form of this command is as follows: ```sh diff --git a/content/k3s/latest/en/installation/network-options/_index.md b/content/k3s/latest/en/installation/network-options/_index.md index c87b2783831..97873e4151b 100644 --- a/content/k3s/latest/en/installation/network-options/_index.md +++ b/content/k3s/latest/en/installation/network-options/_index.md @@ -3,7 +3,7 @@ title: "Network Options" weight: 25 --- -> **Note:** Please reference the [Networking]({{< baseurl >}}/k3s/latest/en/networking) page for information about CoreDNS, Traefik, and the Service LB. +> **Note:** Please reference the [Networking]({{}}/k3s/latest/en/networking) page for information about CoreDNS, Traefik, and the Service LB. By default, K3s will run with flannel as the CNI, using VXLAN as the default backend. To change the CNI, refer to the section on configuring a [custom CNI](#custom-cni). To change the flannel backend, refer to the flannel options section. diff --git a/content/k3s/latest/en/networking/_index.md b/content/k3s/latest/en/networking/_index.md index 3f1214fb555..3689cdbd759 100644 --- a/content/k3s/latest/en/networking/_index.md +++ b/content/k3s/latest/en/networking/_index.md @@ -3,12 +3,11 @@ title: "Networking" weight: 35 --- -> **Note:** CNI options are covered in detail on the [Installation Network Options]({{< baseurl >}}/k3s/latest/en/installation/network-options/) page. Please reference that page for details on Flannel and the various flannel backend options or how to set up your own CNI. +>**Note:** CNI options are covered in detail on the [Installation Network Options]({{}}/k3s/latest/en/installation/network-options/) page. Please reference that page for details on Flannel and the various flannel backend options or how to set up your own CNI. Open Ports ---------- - -Please reference the [Installation Requirements]({{< baseurl >}}/k3s/latest/en/installation/installation-requirements/#networking) page for port information. +Please reference the [Installation Requirements]({{}}/k3s/latest/en/installation/node-requirements/#networking) page for port information. CoreDNS ------- @@ -22,7 +21,7 @@ Traefik Ingress Controller [Traefik](https://traefik.io/) is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease. It simplifies networking complexity while designing, deploying, and running applications. -Traefik is deployed by default when starting the server. For more information see [Auto Deploying Manifests]({{< baseurl >}}/k3s/latest/en/advanced/#auto-deploying-manifests). The default config file is found in `/var/lib/rancher/k3s/server/manifests/traefik.yaml` and any changes made to this file will automatically be deployed to Kubernetes in a manner similar to `kubectl apply`. +Traefik is deployed by default when starting the server. For more information see [Auto Deploying Manifests]({{}}/k3s/latest/en/advanced/#auto-deploying-manifests). The default config file is found in `/var/lib/rancher/k3s/server/manifests/traefik.yaml` and any changes made to this file will automatically be deployed to Kubernetes in a manner similar to `kubectl apply`. The Traefik ingress controller will use ports 80, 443, and 8080 on the host (i.e. these will not be usable for HostPort or NodePort). diff --git a/content/os/v1.x/en/_index.md b/content/os/v1.x/en/_index.md index 1fd27ba96da..585f581fcf7 100644 --- a/content/os/v1.x/en/_index.md +++ b/content/os/v1.x/en/_index.md @@ -25,7 +25,7 @@ VMWare | 1GB | 1280MB (rancheros.iso)
2048MB (ran GCE | 1GB | 1280MB AWS | 1GB | 1.7GB -You can adjust memory requirements by custom building RancherOS, please refer to [reduce-memory-requirements]({{< baseurl >}}/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/#reduce-memory-requirements) +You can adjust memory requirements by custom building RancherOS, please refer to [reduce-memory-requirements]({{}}/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/#reduce-memory-requirements) ### How RancherOS Works diff --git a/content/os/v1.x/en/about/_index.md b/content/os/v1.x/en/about/_index.md index 05b095c5451..306c4e880a7 100644 --- a/content/os/v1.x/en/about/_index.md +++ b/content/os/v1.x/en/about/_index.md @@ -59,7 +59,7 @@ All of repositories are located within our main GitHub [page](https://github.com [RancherOS Repo](https://github.com/rancher/os): This repo contains the bulk of the RancherOS code. -[RancherOS Services Repo](https://github.com/rancher/os-services): This repo is where any [system-services]({{< baseurl >}}/os/v1.x/en//installation/system-services/adding-system-services/) can be contributed. +[RancherOS Services Repo](https://github.com/rancher/os-services): This repo is where any [system-services]({{}}/os/v1.x/en//installation/system-services/adding-system-services/) can be contributed. [RancherOS Images Repo](https://github.com/rancher/os-images): This repo is for the corresponding service images. diff --git a/content/os/v1.x/en/about/running-rancher-on-rancherOS/_index.md b/content/os/v1.x/en/about/running-rancher-on-rancherOS/_index.md index f0fb87544cd..d447465344b 100644 --- a/content/os/v1.x/en/about/running-rancher-on-rancherOS/_index.md +++ b/content/os/v1.x/en/about/running-rancher-on-rancherOS/_index.md @@ -7,7 +7,7 @@ RancherOS can be used to launch [Rancher](/rancher/) and be used as the OS to ad ### Launching Agents using Cloud-Config -You can easily add hosts into Rancher by using [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config) to launch the rancher/agent container. +You can easily add hosts into Rancher by using [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config) to launch the rancher/agent container. After Rancher is launched and host registration has been saved, you will be able to find use the custom option to add Rancher OS nodes. @@ -37,7 +37,7 @@ rancher: ```
-> **Note:** You can not name the service `rancher-agent` as this will not allow the rancher/agent container to be launched correctly. Please read more about why [you can't name your container as `rancher-agent`]({{< baseurl >}}/rancher/v1.6/en/faqs/agents/#adding-in-name-rancher-agent). +> **Note:** You can not name the service `rancher-agent` as this will not allow the rancher/agent container to be launched correctly. Please read more about why [you can't name your container as `rancher-agent`]({{}}/rancher/v1.6/en/faqs/agents/#adding-in-name-rancher-agent). ### Adding in Host Labels diff --git a/content/os/v1.x/en/installation/amazon-ecs/_index.md b/content/os/v1.x/en/installation/amazon-ecs/_index.md index a76c7675044..7fcbefeef4c 100644 --- a/content/os/v1.x/en/installation/amazon-ecs/_index.md +++ b/content/os/v1.x/en/installation/amazon-ecs/_index.md @@ -11,13 +11,13 @@ Prior to launching RancherOS EC2 instances, the [ECS Container Instance IAM Role ### Launching an instance with ECS -RancherOS makes it easy to join your ECS cluster. The ECS agent is a [system service]({{< baseurl >}}/os/v1.x/en/installation/system-services/adding-system-services/) that is enabled in the ECS enabled AMI. There may be other RancherOS AMIs that don't have the ECS agent enabled by default, but it can easily be added in the user data on any RancherOS AMI. +RancherOS makes it easy to join your ECS cluster. The ECS agent is a [system service]({{}}/os/v1.x/en/installation/system-services/adding-system-services/) that is enabled in the ECS enabled AMI. There may be other RancherOS AMIs that don't have the ECS agent enabled by default, but it can easily be added in the user data on any RancherOS AMI. When launching the RancherOS AMI, you'll need to specify the **IAM Role** and **Advanced Details** -> **User Data** in the **Configure Instance Details** step. For the **IAM Role**, you'll need to be sure to select the ECS Container Instance IAM role. -For the **User Data**, you'll need to pass in the [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config) file. +For the **User Data**, you'll need to pass in the [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config) file. ```yaml #cloud-config @@ -37,7 +37,7 @@ rancher: By default, the ECS agent will be using the `latest` tag for the `amazon-ecs-agent` image. In v0.5.0, we introduced the ability to select which version of the `amazon-ecs-agent`. -To select the version, you can update your [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config) file. +To select the version, you can update your [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config) file. ```yaml #cloud-config diff --git a/content/os/v1.x/en/installation/boot-process/built-in-system-services/_index.md b/content/os/v1.x/en/installation/boot-process/built-in-system-services/_index.md index 32e0f7ce61f..b1cfb9b8eda 100644 --- a/content/os/v1.x/en/installation/boot-process/built-in-system-services/_index.md +++ b/content/os/v1.x/en/installation/boot-process/built-in-system-services/_index.md @@ -3,17 +3,17 @@ title: Built-in System Services weight: 150 --- -To launch RancherOS, we have built-in system services. They are defined in the [Docker Compose](https://docs.docker.com/compose/compose-file/) format, and can be found in the default system config file, `/usr/share/ros/os-config.yml`. You can [add your own system services]({{< baseurl >}}/os/v1.x/en/installation/system-services/adding-system-services/) or override services in the cloud-config. +To launch RancherOS, we have built-in system services. They are defined in the [Docker Compose](https://docs.docker.com/compose/compose-file/) format, and can be found in the default system config file, `/usr/share/ros/os-config.yml`. You can [add your own system services]({{}}/os/v1.x/en/installation/system-services/adding-system-services/) or override services in the cloud-config. ### preload-user-images -Read more about [image preloading]({{< baseurl >}}/os/v1.x/en/installation/boot-process/image-preloading/). +Read more about [image preloading]({{}}/os/v1.x/en/installation/boot-process/image-preloading/). ### network During this service, networking is set up, e.g. hostname, interfaces, and DNS. -It is configured by `hostname` and `rancher.network`settings in [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config). +It is configured by `hostname` and `rancher.network`settings in [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config). ### ntp @@ -24,13 +24,13 @@ Runs `ntpd` in a System Docker container. This service provides the RancherOS user interface by running `sshd` and `getty`. It completes the RancherOS configuration on start up: 1. If the `rancher.password=` kernel parameter exists, it sets `` as the password for the `rancher` user. -2. If there are no host SSH keys, it generates host SSH keys and saves them under `rancher.ssh.keys` in [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config). +2. If there are no host SSH keys, it generates host SSH keys and saves them under `rancher.ssh.keys` in [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config). 3. Runs `cloud-init -execute`, which does the following: - * Updates `.ssh/authorized_keys` in `/home/rancher` and `/home/docker` from [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/ssh-keys/) and metadata. - * Writes files specified by the `write_files` [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/write-files/) setting. - * Resizes the device specified by the `rancher.resize_device` [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/resizing-device-partition/) setting. - * Mount devices specified in the `mounts` [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/storage/additional-mounts/) setting. - * Set sysctl parameters specified in the`rancher.sysctl` [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/sysctl/) setting. + * Updates `.ssh/authorized_keys` in `/home/rancher` and `/home/docker` from [cloud-config]({{}}/os/v1.x/en/installation/configuration/ssh-keys/) and metadata. + * Writes files specified by the `write_files` [cloud-config]({{}}/os/v1.x/en/installation/configuration/write-files/) setting. + * Resizes the device specified by the `rancher.resize_device` [cloud-config]({{}}/os/v1.x/en/installation/configuration/resizing-device-partition/) setting. + * Mount devices specified in the `mounts` [cloud-config]({{}}/os/v1.x/en/installation/storage/additional-mounts/) setting. + * Set sysctl parameters specified in the`rancher.sysctl` [cloud-config]({{}}/os/v1.x/en/installation/configuration/sysctl/) setting. 4. If user-data contained a file that started with `#!`, then a file would be saved at `/var/lib/rancher/conf/cloud-config-script` during cloud-init and then executed. Any errors are ignored. 5. Runs `/opt/rancher/bin/start.sh` if it exists and is executable. Any errors are ignored. 6. Runs `/etc/rc.local` if it exists and is executable. Any errors are ignored. diff --git a/content/os/v1.x/en/installation/boot-process/cloud-init/_index.md b/content/os/v1.x/en/installation/boot-process/cloud-init/_index.md index 85ab3695cea..dfe48a01fb6 100644 --- a/content/os/v1.x/en/installation/boot-process/cloud-init/_index.md +++ b/content/os/v1.x/en/installation/boot-process/cloud-init/_index.md @@ -7,7 +7,7 @@ Userdata and metadata can be fetched from a cloud provider, VM runtime, or manag ### Userdata -Userdata is a file given by users when launching RancherOS hosts. It is stored in different locations depending on its format. If the userdata is a [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config) file, indicated by beginning with `#cloud-config` and being in YAML format, it is stored in `/var/lib/rancher/conf/cloud-config.d/boot.yml`. If the userdata is a script, indicated by beginning with `#!`, it is stored in `/var/lib/rancher/conf/cloud-config-script`. +Userdata is a file given by users when launching RancherOS hosts. It is stored in different locations depending on its format. If the userdata is a [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config) file, indicated by beginning with `#cloud-config` and being in YAML format, it is stored in `/var/lib/rancher/conf/cloud-config.d/boot.yml`. If the userdata is a script, indicated by beginning with `#!`, it is stored in `/var/lib/rancher/conf/cloud-config-script`. ### Metadata @@ -15,7 +15,7 @@ Although the specifics vary based on provider, a metadata file will typically co ## Configuration Load Order -[Cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config/) is read by system services when they need to get configuration. Each additional file overwrites and extends the previous configuration file. +[Cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config/) is read by system services when they need to get configuration. Each additional file overwrites and extends the previous configuration file. 1. `/usr/share/ros/os-config.yml` - This is the system default configuration, which should **not** be modified by users. 2. `/usr/share/ros/oem/oem-config.yml` - This will typically exist by OEM, which should **not** be modified by users. diff --git a/content/os/v1.x/en/installation/configuration/_index.md b/content/os/v1.x/en/installation/configuration/_index.md index 628115f1816..8d835d0c1bc 100644 --- a/content/os/v1.x/en/installation/configuration/_index.md +++ b/content/os/v1.x/en/installation/configuration/_index.md @@ -34,7 +34,7 @@ In our example above, we have our `#cloud-config` line to indicate it's a cloud- ### Manually Changing Configuration To update RancherOS configuration after booting, the `ros config set ` command can be used. -For more complicated settings, like the [sysctl settings]({{< baseurl >}}/os/v1.x/en/installation/configuration/sysctl/), you can also create a small YAML file and then run `sudo ros config merge -i `. +For more complicated settings, like the [sysctl settings]({{}}/os/v1.x/en/installation/configuration/sysctl/), you can also create a small YAML file and then run `sudo ros config merge -i `. #### Getting Values diff --git a/content/os/v1.x/en/installation/configuration/adding-kernel-parameters/_index.md b/content/os/v1.x/en/installation/configuration/adding-kernel-parameters/_index.md index cafa5232098..5571bea102a 100644 --- a/content/os/v1.x/en/installation/configuration/adding-kernel-parameters/_index.md +++ b/content/os/v1.x/en/installation/configuration/adding-kernel-parameters/_index.md @@ -27,7 +27,7 @@ $ sudo system-docker run --rm -it -v /:/host alpine vi /host/boot/global.cfg ### During installation -If you want to set the extra kernel parameters when you are [Installing RancherOS to Disk]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/server/install-to-disk/) please use the `--append` parameter. +If you want to set the extra kernel parameters when you are [Installing RancherOS to Disk]({{}}/os/v1.x/en/installation/running-rancheros/server/install-to-disk/) please use the `--append` parameter. ```bash $ sudo ros install -d /dev/sda --append "rancheros.autologin=tty1" diff --git a/content/os/v1.x/en/installation/configuration/airgap-configuration/_index.md b/content/os/v1.x/en/installation/configuration/airgap-configuration/_index.md index 81db7ae4132..8547f29023e 100644 --- a/content/os/v1.x/en/installation/configuration/airgap-configuration/_index.md +++ b/content/os/v1.x/en/installation/configuration/airgap-configuration/_index.md @@ -10,10 +10,10 @@ In the air gap environment, the Docker registry, RancherOS repositories URL, and You should use a private Docker registry so that `user-docker` and `system-docker` can pull images. -1. Add the private Docker registry domain to the [images prefix]({{< baseurl >}}/os/v1.x/en/installation/configuration/images-prefix/). -2. Set the private registry certificates for `user-docker`. For details, refer to [Certificates for Private Registries]({{< baseurl >}}/os/v1.x/en/installation/configuration/private-registries/#certificates-for-private-registries) +1. Add the private Docker registry domain to the [images prefix]({{}}/os/v1.x/en/installation/configuration/images-prefix/). +2. Set the private registry certificates for `user-docker`. For details, refer to [Certificates for Private Registries]({{}}/os/v1.x/en/installation/configuration/private-registries/#certificates-for-private-registries) 3. Set the private registry certificates for `system-docker`. There are two ways to set the certificates: - - To set the private registry certificates before RancherOS starts, you can run a script included with RancherOS. For details, refer to [Set Custom Certs in ISO]({{< baseurl >}}/os/v1.x/en/installation/configuration/airgap-configuration/#set-custom-certs-in-iso). + - To set the private registry certificates before RancherOS starts, you can run a script included with RancherOS. For details, refer to [Set Custom Certs in ISO]({{}}/os/v1.x/en/installation/configuration/airgap-configuration/#set-custom-certs-in-iso). - To set the private registry certificates after RancherOS starts, append your private registry certs to the `/etc/ssl/certs/ca-certificates.crt.rancher` file. Then reboot to make the certs fully take effect. 4. The images used by RancherOS should be pushed to your private registry. @@ -84,7 +84,7 @@ $ sudo ros config set rancher.upgrade.url https://foo.bar.com/os/releases.yml Here is a total cloud-config example for using RancherOS in an air gap environment. -For `system-docker`, see [Configuring Private Docker Registry]({{< baseurl >}}/os/v1.x/en/installation/configuration/airgap-configuration/#configuring-private-docker-registry). +For `system-docker`, see [Configuring Private Docker Registry]({{}}/os/v1.x/en/installation/configuration/airgap-configuration/#configuring-private-docker-registry). ```yaml #cloud-config diff --git a/content/os/v1.x/en/installation/configuration/date-and-timezone/_index.md b/content/os/v1.x/en/installation/configuration/date-and-timezone/_index.md index 13ec156209f..1c4cff6e930 100644 --- a/content/os/v1.x/en/installation/configuration/date-and-timezone/_index.md +++ b/content/os/v1.x/en/installation/configuration/date-and-timezone/_index.md @@ -5,7 +5,7 @@ weight: 121 The default console keeps time in the Coordinated Universal Time (UTC) zone and synchronizes clocks with the Network Time Protocol (NTP). The Network Time Protocol daemon (ntpd) is an operating system program that maintains the system time in synchronization with time servers using the NTP. -RancherOS can run ntpd in the System Docker container. You can update its configurations by updating `/etc/ntp.conf`. For an example of how to update a file such as `/etc/ntp.conf` within a container, refer to [this page.]({{< baseurl >}}/os/v1.x/en/installation/configuration/write-files/#writing-files-in-specific-system-services) +RancherOS can run ntpd in the System Docker container. You can update its configurations by updating `/etc/ntp.conf`. For an example of how to update a file such as `/etc/ntp.conf` within a container, refer to [this page.]({{}}/os/v1.x/en/installation/configuration/write-files/#writing-files-in-specific-system-services) The default console cannot support changing the time zone because including `tzdata` (time zone data) will increase the ISO size. However, you can change the time zone in the container by passing a flag to specify the time zone when you run the container: diff --git a/content/os/v1.x/en/installation/configuration/docker/_index.md b/content/os/v1.x/en/installation/configuration/docker/_index.md index 0620f6ecd6d..89ec70999e6 100644 --- a/content/os/v1.x/en/installation/configuration/docker/_index.md +++ b/content/os/v1.x/en/installation/configuration/docker/_index.md @@ -3,7 +3,7 @@ title: Configuring Docker or System Docker weight: 126 --- -In RancherOS, you can configure System Docker and Docker daemons by using [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config). +In RancherOS, you can configure System Docker and Docker daemons by using [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config). ### Configuring Docker @@ -61,7 +61,7 @@ Key | Value | Default | Description ---|---|---| --- `extra_args` | List of Strings | `[]` | Arbitrary daemon arguments, appended to the generated command `environment` | List of Strings | `[]` | -`tls` | Boolean | `false` | When [setting up TLS]({{< baseurl >}}/os/v1.x/en/installation/configuration/setting-up-docker-tls/), this key needs to be set to true. +`tls` | Boolean | `false` | When [setting up TLS]({{}}/os/v1.x/en/installation/configuration/setting-up-docker-tls/), this key needs to be set to true. `tls_args` | List of Strings (used only if `tls: true`) | `[]` | `server_key` | String (used only if `tls: true`)| `""` | PEM encoded server TLS key. `server_cert` | String (used only if `tls: true`) | `""` | PEM encoded server TLS certificate. @@ -120,7 +120,7 @@ $ ros config set rancher.system_docker.bip 172.19.0.0/16 _Available as of v1.4.x_ The default path of system-docker logs is `/var/log/system-docker.log`. If you want to write the system-docker logs to a separate partition, -e.g. [RANCHER_OEM partition]({{< baseurl >}}/os/v1.x/en/about/custom-partition-layout/#use-rancher-oem-partition), you can try `rancher.defaults.system_docker_logs`: +e.g. [RANCHER_OEM partition]({{}}/os/v1.x/en/about/custom-partition-layout/#use-rancher-oem-partition), you can try `rancher.defaults.system_docker_logs`: ``` #cloud-config diff --git a/content/os/v1.x/en/installation/configuration/hostname/_index.md b/content/os/v1.x/en/installation/configuration/hostname/_index.md index 0b05fa53e45..d315838c58e 100644 --- a/content/os/v1.x/en/installation/configuration/hostname/_index.md +++ b/content/os/v1.x/en/installation/configuration/hostname/_index.md @@ -3,7 +3,7 @@ title: Setting the Hostname weight: 124 --- -You can set the hostname of the host using [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config). The example below shows how to configure it. +You can set the hostname of the host using [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config). The example below shows how to configure it. ```yaml #cloud-config diff --git a/content/os/v1.x/en/installation/configuration/private-registries/_index.md b/content/os/v1.x/en/installation/configuration/private-registries/_index.md index 5abe0adbbaf..c371956f3fd 100644 --- a/content/os/v1.x/en/installation/configuration/private-registries/_index.md +++ b/content/os/v1.x/en/installation/configuration/private-registries/_index.md @@ -3,7 +3,7 @@ title: Private Registries weight: 128 --- -When launching services through a [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config), it is sometimes necessary to pull a private image from DockerHub or from a private registry. Authentication for these can be embedded in your cloud-config. +When launching services through a [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config), it is sometimes necessary to pull a private image from DockerHub or from a private registry. Authentication for these can be embedded in your cloud-config. For example, to add authentication for DockerHub: @@ -61,7 +61,7 @@ write_files: ### Certificates for Private Registries -Certificates can be stored in the standard locations (i.e. `/etc/docker/certs.d`) following the [Docker documentation](https://docs.docker.com/registry/insecure). By using the `write_files` directive of the [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config), the certificates can be written directly into `/etc/docker/certs.d`. +Certificates can be stored in the standard locations (i.e. `/etc/docker/certs.d`) following the [Docker documentation](https://docs.docker.com/registry/insecure). By using the `write_files` directive of the [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config), the certificates can be written directly into `/etc/docker/certs.d`. ```yaml #cloud-config diff --git a/content/os/v1.x/en/installation/configuration/running-commands/_index.md b/content/os/v1.x/en/installation/configuration/running-commands/_index.md index 11b8d44d8be..b18fc06b875 100644 --- a/content/os/v1.x/en/installation/configuration/running-commands/_index.md +++ b/content/os/v1.x/en/installation/configuration/running-commands/_index.md @@ -31,4 +31,4 @@ write_files: docker run -d nginx ``` -Running Docker commands in this manner is useful when pieces of the `docker run` command are dynamically generated. For services whose configuration is static, [adding a system service]({{< baseurl >}}/os/v1.x/en/installation/system-services/adding-system-services/) is recommended. +Running Docker commands in this manner is useful when pieces of the `docker run` command are dynamically generated. For services whose configuration is static, [adding a system service]({{}}/os/v1.x/en/installation/system-services/adding-system-services/) is recommended. diff --git a/content/os/v1.x/en/installation/configuration/ssh-keys/_index.md b/content/os/v1.x/en/installation/configuration/ssh-keys/_index.md index 2204c5b637a..1752042dcf1 100644 --- a/content/os/v1.x/en/installation/configuration/ssh-keys/_index.md +++ b/content/os/v1.x/en/installation/configuration/ssh-keys/_index.md @@ -3,7 +3,7 @@ title: SSH Settings weight: 121 --- -RancherOS supports adding SSH keys through the [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config) file. Within the cloud-config file, you simply add the ssh keys within the `ssh_authorized_keys` key. +RancherOS supports adding SSH keys through the [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config) file. Within the cloud-config file, you simply add the ssh keys within the `ssh_authorized_keys` key. ```yaml #cloud-config diff --git a/content/os/v1.x/en/installation/configuration/switching-consoles/_index.md b/content/os/v1.x/en/installation/configuration/switching-consoles/_index.md index e351cac5b65..d9dd64176af 100644 --- a/content/os/v1.x/en/installation/configuration/switching-consoles/_index.md +++ b/content/os/v1.x/en/installation/configuration/switching-consoles/_index.md @@ -3,13 +3,13 @@ title: Switching Consoles weight: 125 --- -When [booting from the ISO]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/), RancherOS starts with the default console, which is based on busybox. +When [booting from the ISO]({{}}/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/), RancherOS starts with the default console, which is based on busybox. -You can select which console you want RancherOS to start with using the [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config). +You can select which console you want RancherOS to start with using the [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config). ### Enabling Consoles using Cloud-Config -When launching RancherOS with a [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config) file, you can select which console you want to use. +When launching RancherOS with a [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config) file, you can select which console you want to use. Currently, the list of available consoles are: @@ -102,7 +102,7 @@ All consoles except the default (busybox) console are persistent. Persistent con
-> **Note:** When using a persistent console and in the current version's console, [rolling back]({{< baseurl >}}/os/v1.x/en/upgrading/#rolling-back-an-upgrade) is not supported. For example, rolling back to v0.4.5 when using a v0.5.0 persistent console is not supported. +> **Note:** When using a persistent console and in the current version's console, [rolling back]({{}}/os/v1.x/en/upgrading/#rolling-back-an-upgrade) is not supported. For example, rolling back to v0.4.5 when using a v0.5.0 persistent console is not supported. ### Enabling Consoles diff --git a/content/os/v1.x/en/installation/configuration/switching-docker-versions/_index.md b/content/os/v1.x/en/installation/configuration/switching-docker-versions/_index.md index e51d1d46405..c64e20b7733 100644 --- a/content/os/v1.x/en/installation/configuration/switching-docker-versions/_index.md +++ b/content/os/v1.x/en/installation/configuration/switching-docker-versions/_index.md @@ -3,7 +3,7 @@ title: Switching Docker Versions weight: 129 --- -The version of User Docker used in RancherOS can be configured using a [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config) file or by using the `ros engine` command. +The version of User Docker used in RancherOS can be configured using a [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config) file or by using the `ros engine` command. > **Note:** There are known issues in Docker when switching between versions. For production systems, we recommend setting the Docker engine only once [using a cloud-config](#setting-the-docker-engine-using-cloud-config). @@ -83,7 +83,7 @@ FROM scratch COPY engine /engine ``` -Once the image is built a [system service]({{< baseurl >}}/os/v1.x/en/installation/system-services/adding-system-services/) configuration file must be created. An [example file](https://github.com/rancher/os-services/blob/master/d/docker-18.06.3-ce.yml) can be found in the rancher/os-services repo. Change the `image` field to point to the Docker engine image you've built. +Once the image is built a [system service]({{}}/os/v1.x/en/installation/system-services/adding-system-services/) configuration file must be created. An [example file](https://github.com/rancher/os-services/blob/master/d/docker-18.06.3-ce.yml) can be found in the rancher/os-services repo. Change the `image` field to point to the Docker engine image you've built. All of the previously mentioned methods of switching Docker engines are now available. For example, if your service file is located at `https://myservicefile` then the following cloud-config file could be used to use your custom Docker engine. diff --git a/content/os/v1.x/en/installation/configuration/users/_index.md b/content/os/v1.x/en/installation/configuration/users/_index.md index 529281eef07..0a89ec8d5ea 100644 --- a/content/os/v1.x/en/installation/configuration/users/_index.md +++ b/content/os/v1.x/en/installation/configuration/users/_index.md @@ -5,7 +5,7 @@ weight: 130 Currently, we don't support adding other users besides `rancher`. -You _can_ add users in the console container, but these users will only exist as long as the console container exists. It only makes sense to add users in a [persistent consoles]({{< baseurl >}}/os/v1.x/en/installation/custom-builds/custom-console/#console-persistence). +You _can_ add users in the console container, but these users will only exist as long as the console container exists. It only makes sense to add users in a [persistent consoles]({{}}/os/v1.x/en/installation/custom-builds/custom-console/#console-persistence). If you want the console user to be able to ssh into RancherOS, you need to add them to the `docker` group. diff --git a/content/os/v1.x/en/installation/custom-builds/custom-console/_index.md b/content/os/v1.x/en/installation/custom-builds/custom-console/_index.md index c24ca816aeb..f3c1b330765 100644 --- a/content/os/v1.x/en/installation/custom-builds/custom-console/_index.md +++ b/content/os/v1.x/en/installation/custom-builds/custom-console/_index.md @@ -3,13 +3,13 @@ title: Custom Console weight: 180 --- -When [booting from the ISO]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/), RancherOS starts with the default console, which is based on busybox. +When [booting from the ISO]({{}}/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/), RancherOS starts with the default console, which is based on busybox. -You can select which console you want RancherOS to start with using the [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config). +You can select which console you want RancherOS to start with using the [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config). ### Enabling Consoles using Cloud-Config -When launching RancherOS with a [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config) file, you can select which console you want to use. +When launching RancherOS with a [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config) file, you can select which console you want to use. Currently, the list of available consoles are: @@ -102,7 +102,7 @@ All consoles except the default (busybox) console are persistent. Persistent con
-> **Note:** When using a persistent console and in the current version's console, [rolling back]({{< baseurl >}}/os/v1.x/en/upgrading/#rolling-back-an-upgrade) is not supported. For example, rolling back to v0.4.5 when using a v0.5.0 persistent console is not supported. +> **Note:** When using a persistent console and in the current version's console, [rolling back]({{}}/os/v1.x/en/upgrading/#rolling-back-an-upgrade) is not supported. For example, rolling back to v0.4.5 when using a v0.5.0 persistent console is not supported. ### Enabling Consoles diff --git a/content/os/v1.x/en/installation/custom-builds/custom-kernels/_index.md b/content/os/v1.x/en/installation/custom-builds/custom-kernels/_index.md index 8a7ff668a11..b3d6d35baae 100644 --- a/content/os/v1.x/en/installation/custom-builds/custom-kernels/_index.md +++ b/content/os/v1.x/en/installation/custom-builds/custom-kernels/_index.md @@ -59,7 +59,7 @@ Your kernel should be packaged and published as a set of files of the following ### Building a RancherOS release using the Packaged kernel files. -By default, RancherOS ships with the kernel provided by the [os-kernel repository](https://github.com/rancher/os-kernel). Swapping out the default kernel can by done by [building your own custom RancherOS ISO]({{< baseurl >}}/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/). +By default, RancherOS ships with the kernel provided by the [os-kernel repository](https://github.com/rancher/os-kernel). Swapping out the default kernel can by done by [building your own custom RancherOS ISO]({{}}/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/). Create a clone of the main [RancherOS repository](https://github.com/rancher/os) to your local machine with a `git clone`. @@ -75,6 +75,6 @@ ARG KERNEL_VERSION_amd64=4.14.63-rancher ARG KERNEL_URL_amd64=https://link/xxxx ``` -After you've replaced the URL with your custom kernel, you can follow the steps in [building your own custom RancherOS ISO]({{< baseurl >}}/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/). +After you've replaced the URL with your custom kernel, you can follow the steps in [building your own custom RancherOS ISO]({{}}/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/). > **Note:** `KERNEL_URL` settings should point to a Linux kernel, compiled and packaged in a specific way. You can fork [os-kernel repository](https://github.com/rancher/os-kernel) to package your own kernel. diff --git a/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md b/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md index 697189f8d9d..faec99fb845 100644 --- a/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md +++ b/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md @@ -11,7 +11,7 @@ Create a clone of the main [RancherOS repository](https://github.com/rancher/os) $ git clone https://github.com/rancher/os.git ``` -In the root of the repository, the "General Configuration" section of `Dockerfile.dapper` can be updated to use [custom kernels]({{< baseurl >}}/os/v1.x/en/installation/custom-builds/custom-kernels). +In the root of the repository, the "General Configuration" section of `Dockerfile.dapper` can be updated to use [custom kernels]({{}}/os/v1.x/en/installation/custom-builds/custom-kernels). After you've saved your edits, run `make` in the root directory. After the build has completed, a `./dist/artifacts` directory will be created with the custom built RancherOS release files. Build Requirements: `bash`, `make`, `docker` (Docker version >= 1.10.3) @@ -29,7 +29,7 @@ If you need a compressed ISO, you can run this command: $ make release ``` -The `rancheros.iso` is ready to be used to [boot RancherOS from ISO]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/) or [launch RancherOS using Docker Machine]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/workstation/docker-machine). +The `rancheros.iso` is ready to be used to [boot RancherOS from ISO]({{}}/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/) or [launch RancherOS using Docker Machine]({{}}/os/v1.x/en/installation/running-rancheros/workstation/docker-machine). ## Creating a GCE Image Archive @@ -50,7 +50,7 @@ RANCHEROS_VERSION=v1.4.0 make build-gce #### Reduce Memory Requirements -With changes to the kernel and built Docker, RancherOS booting requires more memory. For details, please refer to the [memory requirements]({{< baseurl >}}/os/v1.x/en/#hardware-requirements). +With changes to the kernel and built Docker, RancherOS booting requires more memory. For details, please refer to the [memory requirements]({{}}/os/v1.x/en/#hardware-requirements). By customizing the ISO, you can reduce the memory usage on boot. The easiest way is to downgrade the built-in Docker version, because Docker takes up a lot of space. This can effectively reduce the memory required to decompress the `initrd` on boot. Using docker 17.03 is a good choice: diff --git a/content/os/v1.x/en/installation/running-rancheros/_index.md b/content/os/v1.x/en/installation/running-rancheros/_index.md index c677f71c35e..17f070f3636 100644 --- a/content/os/v1.x/en/installation/running-rancheros/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/_index.md @@ -3,37 +3,37 @@ title: Running RancherOS weight: 100 --- -RancherOS runs on virtualization platforms, cloud providers and bare metal servers. We also support running a local VM on your laptop. To start running RancherOS as quickly as possible, follow our [Quick Start Guide]({{< baseurl >}}/os/v1.x/en/quick-start-guide/). +RancherOS runs on virtualization platforms, cloud providers and bare metal servers. We also support running a local VM on your laptop. To start running RancherOS as quickly as possible, follow our [Quick Start Guide]({{}}/os/v1.x/en/quick-start-guide/). ### Platforms #### Workstation -[Docker Machine]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/workstation/docker-machine) +[Docker Machine]({{}}/os/v1.x/en/installation/running-rancheros/workstation/docker-machine) -[Boot from ISO]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso) +[Boot from ISO]({{}}/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso) #### Cloud -[Amazon EC2]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/aws) +[Amazon EC2]({{}}/os/v1.x/en/installation/running-rancheros/cloud/aws) -[Google Compute Engine]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/gce) +[Google Compute Engine]({{}}/os/v1.x/en/installation/running-rancheros/cloud/gce) -[DigitalOcean]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/do) +[DigitalOcean]({{}}/os/v1.x/en/installation/running-rancheros/cloud/do) -[Azure]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/azure) +[Azure]({{}}/os/v1.x/en/installation/running-rancheros/cloud/azure) -[OpenStack]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/openstack) +[OpenStack]({{}}/os/v1.x/en/installation/running-rancheros/cloud/openstack) -[VMware ESXi]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi) +[VMware ESXi]({{}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi) -[Aliyun]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/aliyun) +[Aliyun]({{}}/os/v1.x/en/installation/running-rancheros/cloud/aliyun) #### Bare Metal & Virtual Servers -[PXE]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/server/pxe) +[PXE]({{}}/os/v1.x/en/installation/running-rancheros/server/pxe) -[Install to Hard Disk]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/server/install-to-disk) +[Install to Hard Disk]({{}}/os/v1.x/en/installation/running-rancheros/server/install-to-disk) -[Raspberry Pi]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/server/raspberry-pi) +[Raspberry Pi]({{}}/os/v1.x/en/installation/running-rancheros/server/raspberry-pi) diff --git a/content/os/v1.x/en/installation/running-rancheros/cloud/aliyun/_index.md b/content/os/v1.x/en/installation/running-rancheros/cloud/aliyun/_index.md index ce08ce913fb..839d0200e90 100644 --- a/content/os/v1.x/en/installation/running-rancheros/cloud/aliyun/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/cloud/aliyun/_index.md @@ -13,7 +13,7 @@ RancherOS is available as an image in Aliyun, and can be easily run in Elastic C Example: -![RancherOS on Aliyun 1]({{< baseurl >}}/img/os/RancherOS_aliyun1.jpg) +![RancherOS on Aliyun 1]({{}}/img/os/RancherOS_aliyun1.jpg) ## Options @@ -29,6 +29,6 @@ After the image is uploaded, we can use the `Aliyun Console` to start a new inst Since the image is private, we need to use the `Custom Images`. -![RancherOS on Aliyun 2]({{< baseurl >}}/img/os/RancherOS_aliyun2.jpg) +![RancherOS on Aliyun 2]({{}}/img/os/RancherOS_aliyun2.jpg) After the instance is successfully started, we can login with the `rancher` user via SSH. diff --git a/content/os/v1.x/en/installation/running-rancheros/cloud/aws/_index.md b/content/os/v1.x/en/installation/running-rancheros/cloud/aws/_index.md index e8886b5f617..66bb3cb663a 100644 --- a/content/os/v1.x/en/installation/running-rancheros/cloud/aws/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/cloud/aws/_index.md @@ -28,7 +28,7 @@ Let’s walk through how to import and create a RancherOS on EC2 machine using t {{< img "/img/os/Rancher_aws1.png" "RancherOS on AWS 1">}} 2. Select the **Community AMIs** on the sidebar and search for **RancherOS**. Pick the latest version and click **Select**. {{< img "/img/os/Rancher_aws2.png" "RancherOS on AWS 2">}} -3. Go through the steps of creating the instance type through the AWS console. If you want to pass in a [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config) file during boot of RancherOS, you'd pass in the file as **User data** by expanding the **Advanced Details** in **Step 3: Configure Instance Details**. You can pass in the data as text or as a file. +3. Go through the steps of creating the instance type through the AWS console. If you want to pass in a [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config) file during boot of RancherOS, you'd pass in the file as **User data** by expanding the **Advanced Details** in **Step 3: Configure Instance Details**. You can pass in the data as text or as a file. {{< img "/img/os/Rancher_aws6.png" "RancherOS on AWS 6">}} After going through all the steps, you finally click on **Launch**, and either create a new key pair or choose an existing key pair to be used with the EC2 instance. If you have created a new key pair, download the key pair. If you have chosen an existing key pair, make sure you have the key pair accessible. Click on **Launch Instances**. {{< img "/img/os/Rancher_aws3.png" "RancherOS on AWS 3">}} diff --git a/content/os/v1.x/en/installation/running-rancheros/cloud/do/_index.md b/content/os/v1.x/en/installation/running-rancheros/cloud/do/_index.md index d644822ded6..683fbe0ef9c 100644 --- a/content/os/v1.x/en/installation/running-rancheros/cloud/do/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/cloud/do/_index.md @@ -15,7 +15,7 @@ To start a RancherOS Droplet on Digital Ocean: 1. Click **Create Droplet.** 1. Click the **Container distributions** tab. 1. Click **RancherOS.** -1. Choose a plan. Make sure your Droplet has the [minimum hardware requirements for RancherOS]({{< baseurl >}}os/v1.x/en/overview/#hardware-requirements). +1. Choose a plan. Make sure your Droplet has the [minimum hardware requirements for RancherOS]({{}}os/v1.x/en/overview/#hardware-requirements). 1. Choose any options for backups, block storage, and datacenter region. 1. Optional: In the **Select additional options** section, you can check the **User data** box and enter a `cloud-config` file in the text box that appears. The `cloud-config` file is used to provide a script to be run on the first boot. An example is below. 1. Choose an SSH key that you have access to, or generate a new SSH key. diff --git a/content/os/v1.x/en/installation/running-rancheros/cloud/gce/_index.md b/content/os/v1.x/en/installation/running-rancheros/cloud/gce/_index.md index 6545a2a3477..0c08d8fc1ed 100644 --- a/content/os/v1.x/en/installation/running-rancheros/cloud/gce/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/cloud/gce/_index.md @@ -3,7 +3,7 @@ title: Google Compute Engine (GCE) weight: 106 --- -> **Note:** Due to the maximum transmission unit (MTU) of [1460 bytes on GCE](https://cloud.google.com/compute/docs/troubleshooting#packetfragmentation), you will need to configure your [network interfaces]({{< baseurl >}}/os/v1.x/en/installation/networking/interfaces/) and both the [Docker and System Docker]({{< baseurl >}}/os/v1.x/en/installation/configuration/docker/) to use a MTU of 1460 bytes or you will encounter weird networking related errors. +> **Note:** Due to the maximum transmission unit (MTU) of [1460 bytes on GCE](https://cloud.google.com/compute/docs/troubleshooting#packetfragmentation), you will need to configure your [network interfaces]({{}}/os/v1.x/en/installation/networking/interfaces/) and both the [Docker and System Docker]({{}}/os/v1.x/en/installation/configuration/docker/) to use a MTU of 1460 bytes or you will encounter weird networking related errors. ### Adding the RancherOS Image into GCE @@ -26,7 +26,7 @@ $ gcloud compute instances create --project --zone }}/os/v1.x/en/installation/configuration/#cloud-config), you can pass it as metadata upon creation of the instance during the `gcloud compute` command. The file will need to be stored locally before running the command. The key of the metadata will be `user-data` and the value is the location of the file. If any SSH keys are added in the cloud config file, it will also be added to the **rancher** user. +If you want to pass in your own cloud config file that will be processed by [cloud init]({{}}/os/v1.x/en/installation/configuration/#cloud-config), you can pass it as metadata upon creation of the instance during the `gcloud compute` command. The file will need to be stored locally before running the command. The key of the metadata will be `user-data` and the value is the location of the file. If any SSH keys are added in the cloud config file, it will also be added to the **rancher** user. ``` $ gcloud compute instances create --project --zone --image --metadata-from-file user-data=/Directory/of/Cloud_Config.yml @@ -74,11 +74,11 @@ Updated [https://www.googleapis.com/compute/v1/projects/PROJECT_ID/zones/ZONE_OF After the image is uploaded, it's easy to use the console to create new instances. You will **not** be able to upload your own cloud config file when creating instances through the console. You can add it after the instance is created using `gcloud compute` commands and resetting the instance. 1. Make sure you are in the project that the image was created in. - ![RancherOS on GCE 4]({{< baseurl >}}/img/os/Rancher_gce4.png) + ![RancherOS on GCE 4]({{}}/img/os/Rancher_gce4.png) 2. In the navigation bar, click on the **VM instances**, which is located at Compute -> Compute Engine -> Metadata. Click on **Create instance**. - ![RancherOS on GCE 5]({{< baseurl >}}/img/os/Rancher_gce5.png) + ![RancherOS on GCE 5]({{}}/img/os/Rancher_gce5.png) 2. Fill out the information for your instance. In the **Image** dropdown, your private image will be listed among the public images provided by Google. Select the private image for RancherOS. Click **Create**. - ![RancherOS on GCE 6]({{< baseurl >}}/img/os/Rancher_gce6.png) + ![RancherOS on GCE 6]({{}}/img/os/Rancher_gce6.png) 3. Your instance is being created and will be up and running shortly! #### Adding SSH keys @@ -89,7 +89,7 @@ In order to SSH into the GCE instance, you will need to have SSH keys set up in In your project, click on **Metadata**, which is located within Compute -> Compute Engine -> Metadata. Click on **SSH Keys**. -![RancherOS on GCE 7]({{< baseurl >}}/img/os/Rancher_gce7.png) +![RancherOS on GCE 7]({{}}/img/os/Rancher_gce7.png) Add the SSH keys that you want to have access to any instances within your project. @@ -99,11 +99,11 @@ Note: If you do this after any RancherOS instance is created, you will need to r After your instance is created, click on the instance name. Scroll down to the **SSH Keys** section and click on **Add SSH key**. This key will only be applicable to the instance. -![RancherOS on GCE 8]({{< baseurl >}}/img/os/Rancher_gce8.png) +![RancherOS on GCE 8]({{}}/img/os/Rancher_gce8.png) After the SSH keys have been added, you'll need to reset the machine, by clicking **Reset**. -![RancherOS on GCE 9]({{< baseurl >}}/img/os/Rancher_gce9.png) +![RancherOS on GCE 9]({{}}/img/os/Rancher_gce9.png) After a little bit, you will be able to SSH into the box using the **rancher** user. diff --git a/content/os/v1.x/en/installation/running-rancheros/cloud/openstack/_index.md b/content/os/v1.x/en/installation/running-rancheros/cloud/openstack/_index.md index 7649d6e7e1a..0a55044ed47 100644 --- a/content/os/v1.x/en/installation/running-rancheros/cloud/openstack/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/cloud/openstack/_index.md @@ -5,4 +5,4 @@ weight: 109 As of v0.5.0, RancherOS releases include an Openstack image that can be found on our [releases page](https://github.com/rancher/os/releases). The image format is [QCOW3](https://wiki.qemu.org/Features/Qcow3#Fully_QCOW2_backwards-compatible_feature_set) that is backward compatible with QCOW2. -When launching an instance using the image, you must enable **Advanced Options** -> **Configuration Drive** and in order to use a [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config) file. +When launching an instance using the image, you must enable **Advanced Options** -> **Configuration Drive** and in order to use a [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config) file. diff --git a/content/os/v1.x/en/installation/running-rancheros/server/install-to-disk/_index.md b/content/os/v1.x/en/installation/running-rancheros/server/install-to-disk/_index.md index e0deb1b54a4..0b3363704b8 100644 --- a/content/os/v1.x/en/installation/running-rancheros/server/install-to-disk/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/server/install-to-disk/_index.md @@ -3,7 +3,7 @@ title: Installing to Disk weight: 111 --- -RancherOS comes with a simple installer that will install RancherOS on a given target disk. To install RancherOS on a new disk, you can use the `ros install` command. Before installing, you'll need to have already [booted RancherOS from ISO]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso). Please be sure to pick the `rancheros.iso` from our release [page](https://github.com/rancher/os/releases). +RancherOS comes with a simple installer that will install RancherOS on a given target disk. To install RancherOS on a new disk, you can use the `ros install` command. Before installing, you'll need to have already [booted RancherOS from ISO]({{}}/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso). Please be sure to pick the `rancheros.iso` from our release [page](https://github.com/rancher/os/releases). ### Using `ros install` to Install RancherOS @@ -11,7 +11,7 @@ The `ros install` command orchestrates the installation from the `rancher/os` co #### Cloud-Config -The easiest way to log in is to pass a `cloud-config.yml` file containing your public SSH keys. To learn more about what's supported in our cloud-config, please read our [documentation]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config). +The easiest way to log in is to pass a `cloud-config.yml` file containing your public SSH keys. To learn more about what's supported in our cloud-config, please read our [documentation]({{}}/os/v1.x/en/installation/configuration/#cloud-config). The `ros install` command will process your `cloud-config.yml` file specified with the `-c` flag. This file will also be placed onto the disk and installed to `/var/lib/rancher/conf/`. It will be evaluated on every boot. @@ -61,7 +61,7 @@ Status: Downloaded newer image for rancher/os:v0.5.0 Continue with reboot [y/N]: ``` -After installing RancherOS to disk, you will no longer be automatically logged in as the `rancher` user. You'll need to have added in SSH keys within your [cloud-config file]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config). +After installing RancherOS to disk, you will no longer be automatically logged in as the `rancher` user. You'll need to have added in SSH keys within your [cloud-config file]({{}}/os/v1.x/en/installation/configuration/#cloud-config). #### Installing a Different Version diff --git a/content/os/v1.x/en/installation/running-rancheros/server/pxe/_index.md b/content/os/v1.x/en/installation/running-rancheros/server/pxe/_index.md index 4041c3cf2cf..1b855649615 100644 --- a/content/os/v1.x/en/installation/running-rancheros/server/pxe/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/server/pxe/_index.md @@ -63,11 +63,11 @@ Valid cloud-init datasources for RancherOS. | cmdline | Kernel command line: `cloud-config-url=http://link/user_data` | | configdrive | /media/config-2 | | url | URL address | -| vmware| Set `guestinfo` cloud-init or interface data as per [VMware ESXi]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi) | +| vmware| Set `guestinfo` cloud-init or interface data as per [VMware ESXi]({{}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi) | | * | This will add ["configdrive", "vmware", "ec2", "digitalocean", "packet", "gce"] into the list of datasources to try | The vmware datasource was added as of v1.1. ### Cloud-Config -When booting via iPXE, RancherOS can be configured using a [cloud-config file]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config). +When booting via iPXE, RancherOS can be configured using a [cloud-config file]({{}}/os/v1.x/en/installation/configuration/#cloud-config). diff --git a/content/os/v1.x/en/installation/running-rancheros/server/raspberry-pi/_index.md b/content/os/v1.x/en/installation/running-rancheros/server/raspberry-pi/_index.md index 7ac84cf84bc..a858fb38a2d 100644 --- a/content/os/v1.x/en/installation/running-rancheros/server/raspberry-pi/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/server/raspberry-pi/_index.md @@ -5,7 +5,7 @@ weight: 113 As of v0.5.0, RancherOS releases include a Raspberry Pi image that can be found on our [releases page](https://github.com/rancher/os/releases). The official Raspberry Pi documentation contains instructions on how to [install operating system images](https://www.raspberrypi.org/documentation/installation/installing-images/). -When installing, there is no ability to pass in a [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config). You will need to boot up, change the configuration and then reboot to apply those changes. +When installing, there is no ability to pass in a [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config). You will need to boot up, change the configuration and then reboot to apply those changes. Currently, only Raspberry Pi 3 is tested and known to work. diff --git a/content/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/_index.md b/content/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/_index.md index 6a1b52a6f03..372fa258727 100644 --- a/content/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/_index.md @@ -13,8 +13,8 @@ VMware | [rancheros-vmware.iso](https://releases.rancher.com/os/latest/vmwar Hyper-V | [rancheros-hyperv.iso](https://releases.rancher.com/os/latest/hyperv/rancheros.iso) Proxmox VE | [rancheros-proxmoxve.iso](https://releases.rancher.com/os/latest/proxmoxve/rancheros.iso) -You must boot with enough memory which you can refer to [here]({{< baseurl >}}/os/v1.x/en/overview/#hardware-requirements). If you boot with the ISO, you will automatically be logged in as the `rancher` user. Only the ISO is set to use autologin by default. If you run from a cloud or install to disk, SSH keys or a password of your choice is expected to be used. +You must boot with enough memory which you can refer to [here]({{}}/os/v1.x/en/overview/#hardware-requirements). If you boot with the ISO, you will automatically be logged in as the `rancher` user. Only the ISO is set to use autologin by default. If you run from a cloud or install to disk, SSH keys or a password of your choice is expected to be used. ### Install to Disk -After you boot RancherOS from ISO, you can follow the instructions [here]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/server/install-to-disk/) to install RancherOS to a hard disk. +After you boot RancherOS from ISO, you can follow the instructions [here]({{}}/os/v1.x/en/installation/running-rancheros/server/install-to-disk/) to install RancherOS to a hard disk. diff --git a/content/os/v1.x/en/installation/running-rancheros/workstation/docker-machine/_index.md b/content/os/v1.x/en/installation/running-rancheros/workstation/docker-machine/_index.md index 0a21a3f7549..0cf9207f66b 100644 --- a/content/os/v1.x/en/installation/running-rancheros/workstation/docker-machine/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/workstation/docker-machine/_index.md @@ -4,7 +4,7 @@ weight: 101 --- Before we get started, you'll need to make sure that you have docker machine installed. Download it directly from the docker machine [releases](https://github.com/docker/machine/releases). -You also need to know the [memory requirements]({{< baseurl >}}/os/v1.x/en/#hardware-requirements). +You also need to know the [memory requirements]({{}}/os/v1.x/en/#hardware-requirements). > **Note:** If you create a RancherOS instance using Docker Machine, you will not be able to upgrade your version of RancherOS. @@ -116,7 +116,7 @@ Logging into RancherOS follows the standard Docker Machine commands. To login in $ docker-machine ssh ``` -You'll be logged into RancherOS and can start exploring the OS, This will log you into the RancherOS VM. You'll then be able to explore the OS by [adding system services]({{< baseurl >}}/os/v1.x/en/installation/system-services/adding-system-services/), [customizing the configuration]({{< baseurl >}}/os/v1.x/en/installation/configuration/), and launching containers. +You'll be logged into RancherOS and can start exploring the OS, This will log you into the RancherOS VM. You'll then be able to explore the OS by [adding system services]({{}}/os/v1.x/en/installation/system-services/adding-system-services/), [customizing the configuration]({{}}/os/v1.x/en/installation/configuration/), and launching containers. If you want to exit out of RancherOS, you can exit by pressing `Ctrl+D`. diff --git a/content/os/v1.x/en/installation/storage/additional-mounts/_index.md b/content/os/v1.x/en/installation/storage/additional-mounts/_index.md index e568596e3d1..77159ad30ec 100644 --- a/content/os/v1.x/en/installation/storage/additional-mounts/_index.md +++ b/content/os/v1.x/en/installation/storage/additional-mounts/_index.md @@ -3,7 +3,7 @@ title: Additional Mounts weight: 161 --- -Additional mounts can be specified as part of your [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config). These mounts are applied within the console container. Here's a simple example that mounts `/dev/vdb` to `/mnt/s`. +Additional mounts can be specified as part of your [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config). These mounts are applied within the console container. Here's a simple example that mounts `/dev/vdb` to `/mnt/s`. ```yaml #cloud-config diff --git a/content/os/v1.x/en/installation/storage/state-partition/_index.md b/content/os/v1.x/en/installation/storage/state-partition/_index.md index c16152c2771..27b125792b2 100644 --- a/content/os/v1.x/en/installation/storage/state-partition/_index.md +++ b/content/os/v1.x/en/installation/storage/state-partition/_index.md @@ -13,7 +13,7 @@ rancher: dev: LABEL=RANCHER_STATE ``` -For other labels such as `RANCHER_BOOT` and `RANCHER_OEM` and `RANCHER_SWAP`, please refer to [Custom partition layout]({{< baseurl >}}/os/v1.x/en/about/custom-partition-layout/). +For other labels such as `RANCHER_BOOT` and `RANCHER_OEM` and `RANCHER_SWAP`, please refer to [Custom partition layout]({{}}/os/v1.x/en/about/custom-partition-layout/). ### Autoformat diff --git a/content/os/v1.x/en/installation/system-services/custom-system-services/_index.md b/content/os/v1.x/en/installation/system-services/custom-system-services/_index.md index ba63929e047..09e0024b158 100644 --- a/content/os/v1.x/en/installation/system-services/custom-system-services/_index.md +++ b/content/os/v1.x/en/installation/system-services/custom-system-services/_index.md @@ -3,7 +3,7 @@ title: Custom System Services weight: 141 --- -You can also create your own system service in [Docker Compose](https://docs.docker.com/compose/) format. After creating your own custom service, you can launch it in RancherOS in a couple of methods. The service could be directly added to the [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config), or a `docker-compose.yml` file could be saved at a http(s) url location or in a specific directory of RancherOS. +You can also create your own system service in [Docker Compose](https://docs.docker.com/compose/) format. After creating your own custom service, you can launch it in RancherOS in a couple of methods. The service could be directly added to the [cloud-config]({{}}/os/v1.x/en/installation/configuration/#cloud-config), or a `docker-compose.yml` file could be saved at a http(s) url location or in a specific directory of RancherOS. ### Launching Services through Cloud-Config diff --git a/content/os/v1.x/en/overview/_index.md b/content/os/v1.x/en/overview/_index.md index 264f130ef15..6c554718d10 100644 --- a/content/os/v1.x/en/overview/_index.md +++ b/content/os/v1.x/en/overview/_index.md @@ -25,11 +25,11 @@ VMWare | 1GB | 1280MB (rancheros.iso)
2048MB (ran GCE | 1GB | 1280MB AWS | 1GB | 1.7GB -You can adjust memory requirements by custom building RancherOS, please refer to [reduce-memory-requirements]({{< baseurl >}}/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/#reduce-memory-requirements) +You can adjust memory requirements by custom building RancherOS, please refer to [reduce-memory-requirements]({{}}/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/#reduce-memory-requirements) ### How RancherOS Works -Everything in RancherOS is a Docker container. We accomplish this by launching two instances of Docker. One is what we call **System Docker** and is the first process on the system. All other system services, like `ntpd`, `syslog`, and `console`, are running in Docker containers. System Docker replaces traditional init systems like `systemd` and is used to launch [additional system services]({{< baseurl >}}/os/v1.x/en/installation/system-services/adding-system-services/). +Everything in RancherOS is a Docker container. We accomplish this by launching two instances of Docker. One is what we call **System Docker** and is the first process on the system. All other system services, like `ntpd`, `syslog`, and `console`, are running in Docker containers. System Docker replaces traditional init systems like `systemd` and is used to launch [additional system services]({{}}/os/v1.x/en/installation/system-services/adding-system-services/). System Docker runs a special container called **Docker**, which is another Docker daemon responsible for managing all of the user’s containers. Any containers that you launch as a user from the console will run inside this Docker. This creates isolation from the System Docker containers and ensures that normal user commands don’t impact system services. @@ -39,7 +39,7 @@ System Docker runs a special container called **Docker**, which is another Docke ### Running RancherOS -To get started with RancherOS, head over to our [Quick Start Guide]({{< baseurl >}}/os/v1.x/en/quick-start-guide/). +To get started with RancherOS, head over to our [Quick Start Guide]({{}}/os/v1.x/en/quick-start-guide/). ### Latest Release diff --git a/content/os/v1.x/en/quick-start-guide/_index.md b/content/os/v1.x/en/quick-start-guide/_index.md index 7e01e0fc0a3..67403520766 100644 --- a/content/os/v1.x/en/quick-start-guide/_index.md +++ b/content/os/v1.x/en/quick-start-guide/_index.md @@ -3,7 +3,7 @@ title: Quick Start weight: 1 --- -If you have a specific RanchersOS machine requirements, please check out our [guides on running RancherOS]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/). With the rest of this guide, we'll start up a RancherOS using [Docker machine]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/workstation/docker-machine/) and show you some of what RancherOS can do. +If you have a specific RanchersOS machine requirements, please check out our [guides on running RancherOS]({{}}/os/v1.x/en/installation/running-rancheros/). With the rest of this guide, we'll start up a RancherOS using [Docker machine]({{}}/os/v1.x/en/installation/running-rancheros/workstation/docker-machine/) and show you some of what RancherOS can do. ### Launching RancherOS using Docker Machine @@ -120,7 +120,7 @@ $ sudo ros config get rancher.network.dns.nameservers ``` -When using the native Busybox console, any changes to the console will be lost after reboots, only changes to `/home` or `/opt` will be persistent. You can use the `ros console switch` command to switch to a [persistent console]({{< baseurl >}}/os/v1.x/en/installation/custom-builds/custom-console/#console-persistence) and replace the native Busybox console. For example, to switch to the Ubuntu console: +When using the native Busybox console, any changes to the console will be lost after reboots, only changes to `/home` or `/opt` will be persistent. You can use the `ros console switch` command to switch to a [persistent console]({{}}/os/v1.x/en/installation/custom-builds/custom-console/#console-persistence) and replace the native Busybox console. For example, to switch to the Ubuntu console: ``` $ sudo ros console switch ubuntu diff --git a/content/os/v1.x/en/upgrading/_index.md b/content/os/v1.x/en/upgrading/_index.md index beedfcdd821..4c3cf8e0be2 100644 --- a/content/os/v1.x/en/upgrading/_index.md +++ b/content/os/v1.x/en/upgrading/_index.md @@ -9,7 +9,7 @@ Since RancherOS is a kernel and initrd, the upgrade process is downloading a new Before upgrading to any version, please review the release notes on our [releases page](https://github.com/rancher/os/releases) in GitHub to review any updates in the release. -> **Note:** If you are using [`docker-machine`]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/workstation/docker-machine/) then you will not be able to upgrade your RancherOS version. You need to delete and re-create the machine. +> **Note:** If you are using [`docker-machine`]({{}}/os/v1.x/en/installation/running-rancheros/workstation/docker-machine/) then you will not be able to upgrade your RancherOS version. You need to delete and re-create the machine. ### Version Control @@ -64,7 +64,7 @@ $ sudo ros -v ros version v0.5.0 ``` -> **Note:** If you are booting from ISO and have not installed to disk, your upgrade will not be saved. You can view our guide to [installing to disk]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/server/install-to-disk/). +> **Note:** If you are booting from ISO and have not installed to disk, your upgrade will not be saved. You can view our guide to [installing to disk]({{}}/os/v1.x/en/installation/running-rancheros/server/install-to-disk/). #### Upgrading to a Specific Version @@ -114,7 +114,7 @@ ros version 0.4.4
-> **Note:** If you are using a [persistent console]({{< baseurl >}}/os/v1.x/en/installation/custom-builds/custom-console/#console-persistence) and in the current version's console, rolling back is not supported. For example, rolling back to v0.4.5 when using a v0.5.0 persistent console is not supported. +> **Note:** If you are using a [persistent console]({{}}/os/v1.x/en/installation/custom-builds/custom-console/#console-persistence) and in the current version's console, rolling back is not supported. For example, rolling back to v0.4.5 when using a v0.5.0 persistent console is not supported. ### Staging an Upgrade diff --git a/content/rancher/v2.x/en/admin-settings/_index.md b/content/rancher/v2.x/en/admin-settings/_index.md index e1dc6d52f2c..2242b4d3328 100644 --- a/content/rancher/v2.x/en/admin-settings/_index.md +++ b/content/rancher/v2.x/en/admin-settings/_index.md @@ -9,7 +9,7 @@ aliases: - /rancher/v2.x/en/admin-settings/log-in/ --- -After installation, the [system administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. +After installation, the [system administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. ## First Log In @@ -21,7 +21,7 @@ After you log into Rancher for the first time, Rancher will prompt you for a **R One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows to set up local users and/or connect to an external authentication provider. By connecting to an external authentication provider, you can leverage that provider's user and groups. -For more information how authentication works and how to configure each provider, see [Authentication]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/). +For more information how authentication works and how to configure each provider, see [Authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/). ## Authorization @@ -33,13 +33,13 @@ For more information how authorization works and how to customize roles, see [Ro _Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification, e.g. root privileges. If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message. -For more information how to create and use PSPs, see [Pod Security Policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/). +For more information how to create and use PSPs, see [Pod Security Policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/). ## Provisioning Drivers -Drivers in Rancher allow you to manage which providers can be used to provision [hosted Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. +Drivers in Rancher allow you to manage which providers can be used to provision [hosted Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. -For more information, see [Provisioning Drivers]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/). +For more information, see [Provisioning Drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/). ## Adding Kubernetes Versions into Rancher @@ -47,9 +47,9 @@ _Available as of v2.3.0_ With this feature, you can upgrade to the latest version of Kubernetes as soon as it is released, without upgrading Rancher. This feature allows you to easily upgrade Kubernetes patch versions (i.e. `v1.15.X`), but not intended to upgrade Kubernetes minor versions (i.e. `v1.X.0`) as Kubernetes tends to deprecate or add APIs between minor versions. -The information that Rancher uses to provision [RKE clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) is now located in the Rancher Kubernetes Metadata. For details on metadata configuration and how to change the Kubernetes version used for provisioning RKE clusters, see [Rancher Kubernetes Metadata.]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata/) +The information that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) is now located in the Rancher Kubernetes Metadata. For details on metadata configuration and how to change the Kubernetes version used for provisioning RKE clusters, see [Rancher Kubernetes Metadata.]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata/) -Rancher Kubernetes Metadata contains Kubernetes version information which Rancher uses to provision [RKE clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). +Rancher Kubernetes Metadata contains Kubernetes version information which Rancher uses to provision [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata/). diff --git a/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md index f74e1e8b0ce..6b72f6752c4 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md @@ -7,11 +7,11 @@ aliases: If your organization uses Microsoft Active Directory as central user repository, you can configure Rancher to communicate with an Active Directory server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the Active Directory, while allowing end-users to authenticate with their AD credentials when logging in to the Rancher UI. -Rancher uses LDAP to communicate with the Active Directory server. The authentication flow for Active Directory is therefore the same as for the [OpenLDAP authentication]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/openldap) integration. +Rancher uses LDAP to communicate with the Active Directory server. The authentication flow for Active Directory is therefore the same as for the [OpenLDAP authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/openldap) integration. > **Note:** > -> Before you start, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). +> Before you start, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). ## Prerequisites @@ -196,4 +196,4 @@ In the same way, we can observe that the value in the **memberOf** attribute in ## Annex: Troubleshooting -If you are experiencing issues while testing the connection to the Active Directory server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{< baseurl >}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. +If you are experiencing issues while testing the connection to the Active Directory server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md index 272f3be784b..1400dfb6ce1 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md @@ -41,13 +41,11 @@ Before enabling Azure AD within Rancher, you must register Rancher with Azure. 1. Use search to open the **App registrations** service. - ![Open App Registrations]({{< baseurl >}}/img/rancher/search-app-registrations.png) + ![Open App Registrations]({{}}/img/rancher/search-app-registrations.png) 1. Click **New registrations** and complete the **Create** form. - ![New App Registration]({{< baseurl >}}/img/rancher/new-app-registration-1.png) - - ![New App Registration Register]({{< baseurl >}}/img/rancher/new-app-registration-2.png) + ![New App Registration]({{}}/img/rancher/new-app-registration.png) 1. Enter a **Name** (something like `Rancher`). @@ -67,7 +65,7 @@ From the Azure portal, create a client secret. Rancher will use this key to auth 1. Use search to open **App registrations** services. Then open the entry for Rancher that you created in the last procedure. - ![Open Rancher Registration]({{< baseurl >}}/img/rancher/open-rancher-app.png) + ![Open Rancher Registration]({{}}/img/rancher/open-rancher-app.png) 1. From the navigation pane on left, click **Certificates and Secrets**. @@ -94,7 +92,7 @@ Next, set API permissions for Rancher within Azure. 1. From the navigation pane on left, select **API permissions**. - ![Open API Permissions]({{< baseurl >}}/img/rancher/select-required-permissions-1.png) + ![Open Required Permissions]({{}}/img/rancher/select-required-permissions.png) 1. Click **Add a permission**. @@ -117,13 +115,35 @@ Next, set API permissions for Rancher within Azure. >**Note:** You must be signed in as an Azure administrator to successfully save your permission settings. -### 4. Copy Azure Application Data + +### 4. Add a Reply URL + +To use Azure AD with Rancher you must whitelist Rancher with Azure. You can complete this whitelisting by providing Azure with a reply URL for Rancher, which is your Rancher Server URL followed with a verification path. + + +1. From the **Setting** blade, select **Reply URLs**. + + ![Azure: Enter Reply URL]({{}}/img/rancher/enter-azure-reply-url.png) + +1. From the **Reply URLs** blade, enter the URL of your Rancher Server, appended with the verification path: `/verify-auth-azure`. + + >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). + +1. Click **Save**. + +**Result:** Your reply URL is saved. + +>**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. + +### 5. Copy Azure Application Data As your final step in Azure, copy the data that you'll use to configure Rancher for Azure AD authentication and paste it into an empty text file. 1. Obtain your Rancher **Tenant ID**. - 1. From **App registrations** select the app configured above. + 1. Use search to open the **Azure Active Directory** service. + + ![Open Azure Active Directory]({{}}/img/rancher/search-azure-ad.png) 1. From the left navigation pane, open **Overview**. @@ -135,7 +155,7 @@ As your final step in Azure, copy the data that you'll use to configure Rancher 1. Use search to open **App registrations**. - ![Open App Registrations]({{< baseurl >}}/img/rancher/search-app-registrations.png) + ![Open App Registrations]({{}}/img/rancher/search-app-registrations.png) 1. Find the entry you created for Rancher. @@ -145,7 +165,7 @@ As your final step in Azure, copy the data that you'll use to configure Rancher 1. From **App registrations**, click **Endpoints**. - ![Click Endpoints]({{< baseurl >}}/img/rancher/click-endpoints.png) + ![Click Endpoints]({{}}/img/rancher/click-endpoints.png) 2. Copy the following endpoints to your clipboard and paste them into your [text file](#tip) (these values will be your Rancher endpoint values). diff --git a/content/rancher/v2.x/en/admin-settings/authentication/freeipa/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/freeipa/_index.md index 7158f26a6a8..37d8ba2e22b 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/freeipa/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/freeipa/_index.md @@ -13,7 +13,7 @@ If your organization uses FreeIPA for user authentication, you can configure Ran > >- You must have a [FreeIPA Server](https://www.freeipa.org/) configured. >- Create a service account in FreeIPA with `read-only` access. Rancher uses this account to verify group membership when a user makes a request using an API key. ->- Read [External Authentication Configuration and Principal Users]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). +>- Read [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). 1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). diff --git a/content/rancher/v2.x/en/admin-settings/authentication/github/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/github/_index.md index 55e505e26f3..9e2c4266c56 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/github/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/github/_index.md @@ -7,7 +7,7 @@ aliases: In environments using GitHub, you can configure Rancher to allow sign on using GitHub credentials. ->**Prerequisites:** Read [External Authentication Configuration and Principal Users]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). +>**Prerequisites:** Read [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). 1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). diff --git a/content/rancher/v2.x/en/admin-settings/authentication/keycloak/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/keycloak/_index.md index 5f38f1ec376..197e796fb62 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/keycloak/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/keycloak/_index.md @@ -65,7 +65,7 @@ If your organization uses Keycloak Identity Provider (IdP) for user authenticati ## Annex: Troubleshooting -If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{< baseurl >}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. +If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. ### You are not redirected to Keycloak diff --git a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/_index.md index c79cf3e4087..6062bdb0288 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/_index.md @@ -27,10 +27,10 @@ If your organization uses Microsoft Active Directory Federation Services (AD FS) Setting up Microsoft AD FS with Rancher Server requires configuring AD FS on your Active Directory server, and configuring Rancher to utilize your AD FS server. The following pages serve as guides for setting up Microsoft AD FS authentication on your Rancher installation. -- [1 — Configuring Microsoft AD FS for Rancher]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) -- [2 — Configuring Rancher for Microsoft AD FS]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup) +- [1 — Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) +- [2 — Configuring Rancher for Microsoft AD FS]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup) {{< saml_caveats >}} -### [Next: Configuring Microsoft AD FS for Rancher]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) +### [Next: Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) diff --git a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md index 822a991e3e9..152834ec60c 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md @@ -79,4 +79,4 @@ https:///federationmetadata/2007-06/federationmetadata.xml **Result:** You've added Rancher as a relying trust party. Now you can configure Rancher to leverage AD. -### [Next: Configuring Rancher for Microsoft AD FS]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/) +### [Next: Configuring Rancher for Microsoft AD FS]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/) diff --git a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md index f5ba2a38b0e..d87510c66dd 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md @@ -4,7 +4,7 @@ weight: 1205 --- _Available as of v2.0.7_ -After you complete [Configuring Microsoft AD FS for Rancher]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/), enter your AD FS information into Rancher to allow AD FS users to authenticate with Rancher. +After you complete [Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/), enter your AD FS information into Rancher to allow AD FS users to authenticate with Rancher. >**Important Notes For Configuring Your AD FS Server:** > diff --git a/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md index bce05911aac..2a59301ad9b 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md @@ -18,7 +18,7 @@ If your organization uses LDAP for user authentication, you can configure Ranche > **Note:** > -> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). +> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). ## Prerequisites @@ -41,7 +41,7 @@ In the section titled `1. Configure an OpenLDAP server`, complete the fields w > **Note:** > -> If you are in doubt about the correct values to enter in the user/group Search Base configuration fields, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. +> If you are in doubt about the correct values to enter in the user/group Search Base configuration fields, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{}}/rancher/v2.x/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. **Table 1: OpenLDAP server parameters** @@ -65,7 +65,7 @@ Note that the attribute mappings configured in this section are used by Rancher > **Note:** > -> If you are unfamiliar with the user/group schema used in the OpenLDAP server, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. +> If you are unfamiliar with the user/group schema used in the OpenLDAP server, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{}}/rancher/v2.x/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. #### User Schema @@ -125,4 +125,4 @@ Once you have completed the configuration, proceed by testing the connection to ## Annex: Troubleshooting -If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{< baseurl >}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. +If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/user-groups/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/user-groups/_index.md index 722452e5f63..d88eb423f82 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/user-groups/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/user-groups/_index.md @@ -5,11 +5,11 @@ weight: 1 Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When you configure an external authentication provider, users from that provider will be able to log in to your Rancher server. When a user logs in, the authentication provider will supply your Rancher server with a list of groups to which the user belongs. -Access to clusters, projects, multi-cluster apps, and global DNS providers and entries can be controlled by adding either individual users or groups to these resources. When you add a group to a resource, all users who are members of that group in the authentication provider, will be able to access the resource with the permissions that you've specified for the group. For more information on roles and permissions, see [Role Based Access Control]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/). +Access to clusters, projects, multi-cluster apps, and global DNS providers and entries can be controlled by adding either individual users or groups to these resources. When you add a group to a resource, all users who are members of that group in the authentication provider, will be able to access the resource with the permissions that you've specified for the group. For more information on roles and permissions, see [Role Based Access Control]({{}}/rancher/v2.x/en/admin-settings/rbac/). ## Managing Members -When adding a user or group to a resource, you can search for users or groups by beginning to type their name. The Rancher server will query the authentication provider to find users and groups that match what you've entered. Searching is limited to the authentication provider that you are currently logged in with. For example, if you've enabled GitHub authentication but are logged in using a [local]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/local/) user account, you will not be able to search for GitHub users or groups. +When adding a user or group to a resource, you can search for users or groups by beginning to type their name. The Rancher server will query the authentication provider to find users and groups that match what you've entered. Searching is limited to the authentication provider that you are currently logged in with. For example, if you've enabled GitHub authentication but are logged in using a [local]({{}}/rancher/v2.x/en/admin-settings/authentication/local/) user account, you will not be able to search for GitHub users or groups. All users, whether they are local users or from an authentication provider, can be viewed and managed. From the **Global** view, click on **Users**. diff --git a/content/rancher/v2.x/en/admin-settings/drivers/_index.md b/content/rancher/v2.x/en/admin-settings/drivers/_index.md index 63d202b1fad..11cc9d71582 100644 --- a/content/rancher/v2.x/en/admin-settings/drivers/_index.md +++ b/content/rancher/v2.x/en/admin-settings/drivers/_index.md @@ -3,7 +3,7 @@ title: Provisioning Drivers weight: 1140 --- -Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. +Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. ### Rancher Drivers @@ -18,19 +18,19 @@ There are two types of drivers within Rancher: _Available as of v2.2.0_ -Cluster drivers are used to provision [hosted Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/), such as GKE, EKS, AKS, etc.. The availability of which cluster driver to display when creating a cluster is defined based on the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters for hosted Kubernetes clusters. By default, Rancher is packaged with several existing cluster drivers, but you can also create custom cluster drivers to add to Rancher. +Cluster drivers are used to provision [hosted Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/), such as GKE, EKS, AKS, etc.. The availability of which cluster driver to display when creating a cluster is defined based on the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters for hosted Kubernetes clusters. By default, Rancher is packaged with several existing cluster drivers, but you can also create custom cluster drivers to add to Rancher. By default, Rancher has activated several hosted Kubernetes cloud providers including: -* [Amazon EKS]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/) -* [Google GKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke/) -* [Azure AKS]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/aks/) +* [Amazon EKS]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/) +* [Google GKE]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke/) +* [Azure AKS]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/aks/) There are several other hosted Kubernetes cloud providers that are disabled by default, but are packaged in Rancher: -* [Alibaba ACK]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/) -* [Huawei CCE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/) -* [Tencent]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/) +* [Alibaba ACK]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/) +* [Huawei CCE]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/) +* [Tencent]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/) ## Node Drivers @@ -40,7 +40,7 @@ If there are specific node drivers that you don't want to show to your users, yo Rancher supports several major cloud providers, but by default, these node drivers are active and available for deployment: -* [Amazon EC2]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) -* [Azure]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/) -* [Digital Ocean]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/) -* [vSphere]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/) +* [Amazon EC2]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) +* [Azure]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/) +* [Digital Ocean]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/) +* [vSphere]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/) diff --git a/content/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/_index.md b/content/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/_index.md index f578774e99f..ef92a737bd6 100644 --- a/content/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/_index.md +++ b/content/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/_index.md @@ -5,7 +5,7 @@ weight: 1 _Available as of v2.2.0_ -Cluster drivers are used to create clusters in a [hosted Kubernetes provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/), such as Google GKE. The availability of which cluster driver to display when creating clusters is defined by the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters. By default, Rancher is packaged with several existing cloud provider cluster drivers, but you can also add custom cluster drivers to Rancher. +Cluster drivers are used to create clusters in a [hosted Kubernetes provider]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/), such as Google GKE. The availability of which cluster driver to display when creating clusters is defined by the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters. By default, Rancher is packaged with several existing cloud provider cluster drivers, but you can also add custom cluster drivers to Rancher. If there are specific cluster drivers that you do not want to show your users, you may deactivate those cluster drivers within Rancher and they will not appear as an option for cluster creation. @@ -13,8 +13,8 @@ If there are specific cluster drivers that you do not want to show your users, y >**Prerequisites:** To create, edit, or delete cluster drivers, you need _one_ of the following permissions: > ->- [Administrator Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Cluster Drivers]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. +>- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) +>- [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Cluster Drivers]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. ## Activating/Deactivating Cluster Drivers diff --git a/content/rancher/v2.x/en/admin-settings/drivers/node-drivers/_index.md b/content/rancher/v2.x/en/admin-settings/drivers/node-drivers/_index.md index ba310504acc..5cf47fec86e 100644 --- a/content/rancher/v2.x/en/admin-settings/drivers/node-drivers/_index.md +++ b/content/rancher/v2.x/en/admin-settings/drivers/node-drivers/_index.md @@ -14,8 +14,8 @@ If there are specific node drivers that you don't want to show to your users, yo >**Prerequisites:** To create, edit, or delete drivers, you need _one_ of the following permissions: > ->- [Administrator Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Node Drivers]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. +>- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) +>- [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Node Drivers]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. ## Activating/Deactivating Node Drivers diff --git a/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md b/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md index 7e741e0ae1e..12616772261 100644 --- a/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md +++ b/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md @@ -73,10 +73,10 @@ Rancher ships with two default Pod Security Policies (PSPs): the `restricted` an You can add a Pod Security Policy (PSPs hereafter) in the following contexts: -- [When creating a cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/) -- [When editing an existing cluster]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters/) -- [When creating a project]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#creating-a-project/) -- [When editing an existing project]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/editing-projects/) +- [When creating a cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/) +- [When editing an existing cluster]({{}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters/) +- [When creating a project]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#creating-a-project/) +- [When editing an existing project]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/editing-projects/) > **Note:** We recommend adding PSPs during cluster and project creation instead of adding it to an existing one. diff --git a/content/rancher/v2.x/en/admin-settings/rbac/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/_index.md index ee8ef07a3e7..01b6eaacaa7 100644 --- a/content/rancher/v2.x/en/admin-settings/rbac/_index.md +++ b/content/rancher/v2.x/en/admin-settings/rbac/_index.md @@ -5,7 +5,7 @@ aliases: - /rancher/v2.x/en/concepts/global-configuration/users-permissions-roles/ --- -Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/), users can either be local or external. +Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/), users can either be local or external. After you configure external authentication, the users that display on the **Users** page changes. @@ -17,11 +17,11 @@ After you configure external authentication, the users that display on the **Use Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by _global permissions_, and _cluster and project roles_. -- [Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/): +- [Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/): Define user authorization outside the scope of any particular cluster. -- [Cluster and Project Roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/): +- [Cluster and Project Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/): Define user authorization inside the specific cluster or project where they are assigned the role. diff --git a/content/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/_index.md index 591d1e2365d..1a8f6db193d 100644 --- a/content/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/_index.md +++ b/content/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/_index.md @@ -140,7 +140,7 @@ By default, when a standard user creates a new cluster or project, they are auto There are two methods for changing default cluster/project roles: -- **Assign Custom Roles**: Create a [custom role]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. +- **Assign Custom Roles**: Create a [custom role]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. - **Assign Individual Roles**: Configure multiple [cluster](#cluster-role-reference)/[project](#project-role-reference) roles as default for assignment to the creating user. @@ -148,7 +148,7 @@ There are two methods for changing default cluster/project roles: >**Note:** > ->- Although you can [lock]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/) a default role, the system still assigns the role to users who create a cluster/project. +>- Although you can [lock]({{}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/) a default role, the system still assigns the role to users who create a cluster/project. >- Only users that create clusters/projects inherit their roles. Users added to the cluster/project membership afterward must be explicitly assigned their roles. ### Configuring Default Roles for Cluster and Project Creators diff --git a/content/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/_index.md index 895330c46ea..61993bb082d 100644 --- a/content/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/_index.md +++ b/content/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/_index.md @@ -22,8 +22,8 @@ This section covers the following topics: To complete the tasks on this page, one of the following permissions are required: - - [Administrator Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/). - - [Custom Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. + - [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/). + - [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. ## Creating A Custom Role for a Cluster or Project @@ -68,7 +68,7 @@ The steps to add custom roles differ depending on the version of Rancher. 1. **Name** the role. -1. Choose whether to set the role to a status of [locked]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/). +1. Choose whether to set the role to a status of [locked]({{}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/). > **Note:** Locked roles cannot be assigned to users. diff --git a/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md index 91ea1123625..70c5c93778b 100644 --- a/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md +++ b/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md @@ -27,7 +27,7 @@ If you want to prevent a role from being assigned to users, you can set it to a You can lock roles in two contexts: -- When you're [adding a custom role]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). +- When you're [adding a custom role]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). - When you editing an existing role (see below). 1. From the **Global** view, select **Security** > **Roles**. diff --git a/content/rancher/v2.x/en/api/_index.md b/content/rancher/v2.x/en/api/_index.md index 97a0c5a6489..b2f9e84816d 100644 --- a/content/rancher/v2.x/en/api/_index.md +++ b/content/rancher/v2.x/en/api/_index.md @@ -5,11 +5,11 @@ weight: 7500 ## How to use the API -The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint as well as create [API keys]({{< baseurl >}}/rancher/v2.x/en/user-settings/api-keys/). +The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint as well as create [API keys]({{}}/rancher/v2.x/en/user-settings/api-keys/). ## Authentication -API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys]({{< baseurl >}}/rancher/v2.x/en/user-settings/api-keys/). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. +API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys]({{}}/rancher/v2.x/en/user-settings/api-keys/). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page]({{}}/rancher/v2.x/en/api/api-tokens). diff --git a/content/rancher/v2.x/en/backups/_index.md b/content/rancher/v2.x/en/backups/_index.md index 0f2c8b5a106..d51850c3fe9 100644 --- a/content/rancher/v2.x/en/backups/_index.md +++ b/content/rancher/v2.x/en/backups/_index.md @@ -8,11 +8,11 @@ This section is devoted to protecting your data in a disaster scenario. To protect yourself from a disaster scenario, you should create backups on a regular basis. - - [Rancher Server Backups]({{< baseurl >}}/rancher/v2.x/en/backups/backups) - - [Backing up Rancher Launched Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/) + - [Rancher Server Backups]({{}}/rancher/v2.x/en/backups/backups) + - [Backing up Rancher Launched Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/) In a disaster scenario, you can restore your `etcd` database by restoring a backup. - - [Rancher Server Restorations]({{< baseurl >}}/rancher/v2.x/en/backups/restorations) - - [Restoring Rancher Launched Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/restoring-etcd/) + - [Rancher Server Restorations]({{}}/rancher/v2.x/en/backups/restorations) + - [Restoring Rancher Launched Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/) diff --git a/content/rancher/v2.x/en/backups/backups/_index.md b/content/rancher/v2.x/en/backups/backups/_index.md index 9ef3beb47d8..57e2150b516 100644 --- a/content/rancher/v2.x/en/backups/backups/_index.md +++ b/content/rancher/v2.x/en/backups/backups/_index.md @@ -10,4 +10,4 @@ This section contains information about how to create backups of your Rancher da - [Docker Install Backups](./single-node-backups/) - [Kubernetes Install Backups](./ha-backups/) -If you are looking to back up your [Rancher launched Kubernetes cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), please refer [here]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/). +If you are looking to back up your [Rancher launched Kubernetes cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), please refer [here]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/). diff --git a/content/rancher/v2.x/en/backups/backups/single-node-backups/_index.md b/content/rancher/v2.x/en/backups/backups/single-node-backups/_index.md index e86291f230f..bd2cb04d8d5 100644 --- a/content/rancher/v2.x/en/backups/backups/single-node-backups/_index.md +++ b/content/rancher/v2.x/en/backups/backups/single-node-backups/_index.md @@ -20,7 +20,7 @@ In this command, `` is a placeholder for the date that the data container Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the [procedure below](#creating-a-backup). Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{< baseurl >}}/img/rancher/placeholder-ref.png) +![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) | Placeholder | Example | Description | | -------------------------- | -------------------------- | --------------------------------------------------------- | @@ -68,4 +68,4 @@ This procedure creates a backup that you can restore if Rancher encounters a dis docker start ``` -**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs]({{< baseurl >}}/rancher/v2.x/en/backups/restorations/single-node-restoration) if you need to restore backup data. +**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs]({{}}/rancher/v2.x/en/backups/restorations/single-node-restoration) if you need to restore backup data. diff --git a/content/rancher/v2.x/en/backups/restorations/_index.md b/content/rancher/v2.x/en/backups/restorations/_index.md index 52fd8cab149..2f32ad1d9e2 100644 --- a/content/rancher/v2.x/en/backups/restorations/_index.md +++ b/content/rancher/v2.x/en/backups/restorations/_index.md @@ -4,7 +4,7 @@ weight: 1010 --- If you lose the data on your Rancher Server, you can restore it if you have backups stored in a safe location. -- [Restoring Backups—Docker Installs]({{< baseurl >}}/rancher/v2.x/en/backups/restorations/single-node-restoration/) -- [Restoring Backups—Kubernetes installs]({{< baseurl >}}/rancher/v2.x/en/backups/restorations/ha-restoration/) +- [Restoring Backups—Docker Installs]({{}}/rancher/v2.x/en/backups/restorations/single-node-restoration/) +- [Restoring Backups—Kubernetes installs]({{}}/rancher/v2.x/en/backups/restorations/ha-restoration/) -If you are looking to restore your [Rancher launched Kubernetes cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), please refer [here]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/restoring-etcd/). +If you are looking to restore your [Rancher launched Kubernetes cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), please refer [here]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/). diff --git a/content/rancher/v2.x/en/backups/restorations/ha-restoration/_index.md b/content/rancher/v2.x/en/backups/restorations/ha-restoration/_index.md index ac30f5113c5..5b8cfd3e0b6 100644 --- a/content/rancher/v2.x/en/backups/restorations/ha-restoration/_index.md +++ b/content/rancher/v2.x/en/backups/restorations/ha-restoration/_index.md @@ -8,7 +8,7 @@ aliases: This procedure describes how to use RKE to restore a snapshot of the Rancher Kubernetes cluster. The cluster snapshot will include Kubernetes configuration and the Rancher database and state. -Additionally, the `pki.bundle.tar.gz` file usage is no longer required as v0.2.0 has changed how the [Kubernetes cluster state is stored]({{< baseurl >}}/rke/latest/en/installation/#kubernetes-cluster-state). +Additionally, the `pki.bundle.tar.gz` file usage is no longer required as v0.2.0 has changed how the [Kubernetes cluster state is stored]({{}}/rke/latest/en/installation/#kubernetes-cluster-state). ## Restore Outline @@ -24,11 +24,11 @@ Additionally, the `pki.bundle.tar.gz` file usage is no longer required as v0.2.0 ### 1. Preparation -You will need [RKE]({{< baseurl >}}/rke/latest/en/installation/) and [kubectl]({{< baseurl >}}/rancher/v2.x/en/faq/kubectl/) CLI utilities installed. +You will need [RKE]({{}}/rke/latest/en/installation/) and [kubectl]({{}}/rancher/v2.x/en/faq/kubectl/) CLI utilities installed. -Prepare by creating 3 new nodes to be the target for the restored Rancher instance. See [Kubernetes Install]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/) for node requirements. +Prepare by creating 3 new nodes to be the target for the restored Rancher instance. See [Kubernetes Install]({{}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/) for node requirements. -We recommend that you start with fresh nodes and a clean state. Alternatively you can clear Kubernetes and Rancher configurations from the existing nodes. This will destroy the data on these nodes. See [Node Cleanup]({{< baseurl >}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) for the procedure. +We recommend that you start with fresh nodes and a clean state. Alternatively you can clear Kubernetes and Rancher configurations from the existing nodes. This will destroy the data on these nodes. See [Node Cleanup]({{}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) for the procedure. > **IMPORTANT:** Before starting the restore make sure all the Kubernetes services on the old cluster nodes are stopped. We recommend powering off the nodes to be sure. @@ -135,8 +135,8 @@ S3 specific options are only available for RKE v0.2.0+. | `--bucket-name` value | Specify s3 bucket name | *| | `--folder` value | Specify s3 folder in the bucket name _Available as of v2.3.0_ | *| | `--region` value | Specify the s3 bucket location (optional) | *| -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{< baseurl >}}/rke/latest/en/config-options/#ssh-agent) | | -| `--ignore-docker-version` | [Disable Docker version check]({{< baseurl >}}/rke/latest/en/config-options/#supported-docker-versions) | +| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{}}/rke/latest/en/config-options/#ssh-agent) | | +| `--ignore-docker-version` | [Disable Docker version check]({{}}/rke/latest/en/config-options/#supported-docker-versions) | ### 5. Bring Up the Cluster @@ -150,7 +150,7 @@ rke up --config ./rancher-cluster-restore.yml #### Testing the Cluster -Once RKE completes it will have created a credentials file in the local directory. Configure `kubectl` to use the `kube_config_rancher-cluster-restore.yml` credentials file and check on the state of the cluster. See [Installing and Configuring kubectl]({{< baseurl >}}/rancher/v2.x/en/faq/kubectl/#configuration) for details. +Once RKE completes it will have created a credentials file in the local directory. Configure `kubectl` to use the `kube_config_rancher-cluster-restore.yml` credentials file and check on the state of the cluster. See [Installing and Configuring kubectl]({{}}/rancher/v2.x/en/faq/kubectl/#configuration) for details. Your new cluster will take a few minutes to stabilize. Once you see the new "target node" transition to `Ready` and three old nodes in `NotReady` you are ready to continue. @@ -232,6 +232,6 @@ rke up --config ./rancher-cluster-restore.yml #### Finishing Up -Rancher should now be running and available to manage your Kubernetes clusters. Review the [recommended architecture]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/#recommended-architecture) for Kubernetes installations and update the endpoints for Rancher DNS or the Load Balancer that you built during Step 1 of the Kubernetes install ([1. Create Nodes and Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/#load-balancer)) to target the new cluster. Once the endpoints are updated, the agents on your managed clusters should automatically reconnect. This may take 10-15 minutes due to reconnect back off timeouts. +Rancher should now be running and available to manage your Kubernetes clusters. Review the [recommended architecture]({{}}/rancher/v2.x/en/installation/k8s-install/#recommended-architecture) for Kubernetes installations and update the endpoints for Rancher DNS or the Load Balancer that you built during Step 1 of the Kubernetes install ([1. Create Nodes and Load Balancer]({{}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/#load-balancer)) to target the new cluster. Once the endpoints are updated, the agents on your managed clusters should automatically reconnect. This may take 10-15 minutes due to reconnect back off timeouts. > **IMPORTANT:** Remember to save your new RKE config (`rancher-cluster-restore.yml`) and `kubectl` credentials (`kube_config_rancher-cluster-restore.yml`) files in a safe place for future maintenance. diff --git a/content/rancher/v2.x/en/backups/restorations/single-node-restoration/_index.md b/content/rancher/v2.x/en/backups/restorations/single-node-restoration/_index.md index 9034877c2e4..aefa51a9da5 100644 --- a/content/rancher/v2.x/en/backups/restorations/single-node-restoration/_index.md +++ b/content/rancher/v2.x/en/backups/restorations/single-node-restoration/_index.md @@ -23,7 +23,7 @@ In this command, `` and `-` are e Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the [procedure below](#creating-a-backup). Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{< baseurl >}}/img/rancher/placeholder-ref.png) +![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) | Placeholder | Example | Description | | -------------------------- | -------------------------- | --------------------------------------------------------- | @@ -37,7 +37,7 @@ You can obtain `` and `` by loggi ## Restoring Backups -Using a [backup]({{< baseurl >}}/rancher/v2.x/en/backups/backups/single-node-backups/) that you created earlier, restore Rancher to its last known healthy state. +Using a [backup]({{}}/rancher/v2.x/en/backups/backups/single-node-backups/) that you created earlier, restore Rancher to its last known healthy state. 1. Using a remote Terminal connection, log into the node running your Rancher Server. @@ -46,9 +46,9 @@ Using a [backup]({{< baseurl >}}/rancher/v2.x/en/backups/backups/single-node-bac ``` docker stop ``` -1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs]({{< baseurl >}}/rancher/v2.x/en/backups/backups/single-node-backups/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. +1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs]({{}}/rancher/v2.x/en/backups/backups/single-node-backups/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - If you followed the naming convention we suggested in [Creating Backups—Docker Installs]({{< baseurl >}}/rancher/v2.x/en/backups/backups/single-node-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. + If you followed the naming convention we suggested in [Creating Backups—Docker Installs]({{}}/rancher/v2.x/en/backups/backups/single-node-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. 1. Enter the following command to delete your current state data and replace it with your backup data, replacing the [placeholders](#before-you-start). Don't forget to close the quotes. diff --git a/content/rancher/v2.x/en/best-practices/_index.md b/content/rancher/v2.x/en/best-practices/_index.md index c5aad4106e3..41bbb4cc9c4 100644 --- a/content/rancher/v2.x/en/best-practices/_index.md +++ b/content/rancher/v2.x/en/best-practices/_index.md @@ -11,10 +11,10 @@ Use the navigation bar on the left to find the current best practices for managi For more guidance on best practices, you can consult these resources: -- [Rancher Docs]({{< baseurl >}}) - - [Monitoring]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) - - [Backups and Disaster Recovery]({{< baseurl >}}/rancher/v2.x/en/backups/) - - [Security]({{< baseurl >}}/rancher/v2.x/en/security/) +- [Rancher Docs]({{}}) + - [Monitoring]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) + - [Backups and Disaster Recovery]({{}}/rancher/v2.x/en/backups/) + - [Security]({{}}/rancher/v2.x/en/security/) - [Rancher Blog](https://rancher.com/blog/) - [Articles about best practices on the Rancher blog](https://rancher.com/tags/best-practices/) - [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) diff --git a/content/rancher/v2.x/en/best-practices/deployment-types/_index.md b/content/rancher/v2.x/en/best-practices/deployment-types/_index.md index 82d177cbcaf..ff493e7fbf2 100644 --- a/content/rancher/v2.x/en/best-practices/deployment-types/_index.md +++ b/content/rancher/v2.x/en/best-practices/deployment-types/_index.md @@ -28,11 +28,11 @@ For best performance, run all three of your nodes in the same geographic datacen It's strongly recommended to have a "staging" or "pre-production" environment of the Kubernetes cluster that Rancher runs on. This environment should mirror your production environment as closely as possible in terms of software and hardware configuration. ### Monitor Your Clusters to Plan Capacity -The Rancher server's Kubernetes cluster should run within the [system and hardware requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. +The Rancher server's Kubernetes cluster should run within the [system and hardware requirements]({{}}/rancher/v2.x/en/installation/requirements/) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. However, metrics-driven capacity planning analysis should be the ultimate guidance for scaling Rancher, because the published requirements take into account a variety of workload types. Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution, and Grafana, which lets you visualize the metrics from Prometheus. -After you [enable monitoring]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) in the cluster, you can set up [a notification channel]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) and [cluster alerts]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/alerts/) to let you know if your cluster is approaching its capacity. You can also use the Prometheus and Grafana monitoring framework to establish a baseline for key metrics as you scale. +After you [enable monitoring]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) in the cluster, you can set up [a notification channel]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) and [cluster alerts]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) to let you know if your cluster is approaching its capacity. You can also use the Prometheus and Grafana monitoring framework to establish a baseline for key metrics as you scale. diff --git a/content/rancher/v2.x/en/best-practices/management/_index.md b/content/rancher/v2.x/en/best-practices/management/_index.md index fe7f5f75bf4..4fd202dc1ec 100644 --- a/content/rancher/v2.x/en/best-practices/management/_index.md +++ b/content/rancher/v2.x/en/best-practices/management/_index.md @@ -10,7 +10,7 @@ Rancher allows you to set up numerous combinations of configurations. Some confi These tips can help you solve problems before they happen. ### Run Rancher on a Supported OS and Supported Docker Version -Rancher is container-based and can potentially run on any Linux-based operating system. However, only operating systems listed in the [requirements documentation]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/) should be used for running Rancher, along with a supported version of Docker. These versions have been most thoroughly tested and can be properly supported by the Rancher Support team. +Rancher is container-based and can potentially run on any Linux-based operating system. However, only operating systems listed in the [requirements documentation]({{}}/rancher/v2.x/en/installation/requirements/) should be used for running Rancher, along with a supported version of Docker. These versions have been most thoroughly tested and can be properly supported by the Rancher Support team. ### Upgrade Your Kubernetes Version Keep your Kubernetes cluster up to date with a recent and supported version. Typically the Kubernetes community will support the current version and previous three minor releases (for example, 1.14.x, 1.13.x, 1.12.x, and 1.11.x). After a new version is released, the third-oldest supported version reaches EOL (End of Life) status. Running on an EOL release can be a risk if a security issues are found and patches are not available. The community typically makes minor releases every quarter (every three months). @@ -29,11 +29,11 @@ Rancher [maintains a Terraform provider](https://rancher.com/blog/2019/rancher-2 All upgrades, both patch and feature upgrades, should be first tested on a staging environment before production is upgraded. The more closely the staging environment mirrors production, the higher chance your production upgrade will be successful. ### Renew Certificates Before they Expire -Multiple people in your organization should set up calendar reminders for certificate renewal. Consider renewing the certificate two weeks to one month in advance. If you have multiple certificates to track, consider using [monitoring and alerting mechanisms]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/) to track certificate expiration. +Multiple people in your organization should set up calendar reminders for certificate renewal. Consider renewing the certificate two weeks to one month in advance. If you have multiple certificates to track, consider using [monitoring and alerting mechanisms]({{}}/rancher/v2.x/en/cluster-admin/tools/) to track certificate expiration. Rancher-provisioned Kubernetes clusters will use certificates that expire in one year. Clusters provisioned by other means may have a longer or shorter expiration. -Certificates can be renewed for Rancher-provisioned clusters [through the Rancher user interface]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/certificate-rotation/). +Certificates can be renewed for Rancher-provisioned clusters [through the Rancher user interface]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/). ### Enable Recurring Snapshots for Backing up and Restoring the Cluster Make sure etcd recurring snapshots are enabled. Extend the snapshot retention to a period of time that meets your business needs. In the event of a catastrophic failure or deletion of data, this may be your only recourse for recovery. For details about configuring snapshots, refer to the [RKE documentation]({{}}/rke/latest/en/etcd-snapshots/) or the [Rancher documentation on backups]({{}}/rancher/v2.x/en/backups/). @@ -78,13 +78,13 @@ Provision 3 or 5 etcd nodes. Etcd requires a quorum to determine a leader by the Provision two or more control plane nodes. Some control plane components, such as the `kube-apiserver`, run in [active-active](https://www.jscape.com/blog/active-active-vs-active-passive-high-availability-cluster) mode and will give you more scalability. Other components such as kube-scheduler and kube-controller run in active-passive mode (leader elect) and give you more fault tolerance. ### Monitor Your Cluster -Closely monitor and scale your nodes as needed. You should [enable cluster monitoring]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) and use the Prometheus metrics and Grafana visualization options as a starting point. +Closely monitor and scale your nodes as needed. You should [enable cluster monitoring]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) and use the Prometheus metrics and Grafana visualization options as a starting point. # Tips for Security Below are some basic tips for increasing security in Rancher. For more detailed information about securing your cluster, you can refer to these resources: -- Rancher's [security documentation and Kubernetes cluster hardening guide]({{< baseurl >}}/rancher/v2.x/en/security/) +- Rancher's [security documentation and Kubernetes cluster hardening guide]({{}}/rancher/v2.x/en/security/) - [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) ### Update Rancher with Security Patches diff --git a/content/rancher/v2.x/en/catalog/_index.md b/content/rancher/v2.x/en/catalog/_index.md index 84b297cddd1..2869abf5a3b 100644 --- a/content/rancher/v2.x/en/catalog/_index.md +++ b/content/rancher/v2.x/en/catalog/_index.md @@ -61,7 +61,7 @@ Within Rancher, there are default catalogs packaged as part of Rancher. These ca The Library Catalog includes charts curated by Rancher. Rancher stores charts in a Git repository to expedite the fetch and update of charts. - This catalog features Rancher Charts, which include some [notable advantages]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/#chart-types) over native Helm charts. + This catalog features Rancher Charts, which include some [notable advantages]({{}}/rancher/v2.x/en/catalog/custom/#chart-types) over native Helm charts. - **Helm Stable** @@ -94,11 +94,11 @@ _Available as of v2.2.0_ In Rancher v2.2.0, you can add private catalog repositories using credentials like Username and Password. You may also want to use the OAuth token if your Git or Helm repository server support that. -[Read More About Adding Private Git/Helm Catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/#private-repositories) +[Read More About Adding Private Git/Helm Catalogs]({{}}/rancher/v2.x/en/catalog/custom/#private-repositories) @@ -144,15 +144,15 @@ By creating a customized repository with added files, Rancher improves on Helm r There are two types of catalogs in Rancher. Learn more about each type: -* [Built-in Global Catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/built-in/) -* [Custom Catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/) +* [Built-in Global Catalogs]({{}}/rancher/v2.x/en/catalog/built-in/) +* [Custom Catalogs]({{}}/rancher/v2.x/en/catalog/custom/) ### Apps In Rancher, applications are deployed from the templates in a catalog. Rancher supports two types of applications: -* [Multi-cluster applications]({{< baseurl >}}/rancher/v2.x/en/catalog/multi-cluster-apps/) -* [Applications deployed in a specific Project]({{< baseurl >}}/rancher/v2.x/en/catalog/apps) +* [Multi-cluster applications]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) +* [Applications deployed in a specific Project]({{}}/rancher/v2.x/en/catalog/apps) ### Global DNS @@ -160,7 +160,7 @@ _Available as v2.2.0_ When creating applications that span multiple Kubernetes clusters, a Global DNS entry can be created to route traffic to the endpoints in all of the different clusters. An external DNS server will need be programmed to assign a fully qualified domain name (a.k.a FQDN) to your application. Rancher will use the FQDN you provide and the IP addresses where your application is running to program the DNS. Rancher will gather endpoints from all the Kubernetes clusters running your application and program the DNS. -For more information on how to use this feature, see [Global DNS]({{< baseurl >}}/rancher/v2.x/en/catalog/globaldns/). +For more information on how to use this feature, see [Global DNS]({{}}/rancher/v2.x/en/catalog/globaldns/). ### Chart Compatibility with Rancher diff --git a/content/rancher/v2.x/en/catalog/apps/_index.md b/content/rancher/v2.x/en/catalog/apps/_index.md index 04d509449fc..c5b0b058bc7 100644 --- a/content/rancher/v2.x/en/catalog/apps/_index.md +++ b/content/rancher/v2.x/en/catalog/apps/_index.md @@ -3,9 +3,9 @@ title: Apps in a Project weight: 5005 --- -Within a project, when you want to deploy applications from catalogs, the applications available in your project will be based on the [scope of the catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/#catalog-scope). +Within a project, when you want to deploy applications from catalogs, the applications available in your project will be based on the [scope of the catalogs]({{}}/rancher/v2.x/en/catalog/#catalog-scope). -If your application is using ingresses, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry]({{< baseurl >}}/rancher/v2.x/en/catalog/globaldns/). +If your application is using ingresses, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry]({{}}/rancher/v2.x/en/catalog/globaldns/). ## Prerequisites @@ -16,7 +16,7 @@ To create a multi-cluster app in Rancher, you must have at least one of the foll ## Launching Catalog Applications -After you've either enabled the [built-in global catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/built-in/) or [added your own custom catalog]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/adding), you can start launching catalog applications. +After you've either enabled the [built-in global catalogs]({{}}/rancher/v2.x/en/catalog/built-in/) or [added your own custom catalog]({{}}/rancher/v2.x/en/catalog/custom/adding), you can start launching catalog applications. 1. From the **Global** view, navigate to your project that you want to start deploying applications. @@ -57,7 +57,7 @@ If the Helm chart that you are deploying contains a `questions.yml` file, Ranche #### Key Value Pairs for Native Helm Charts -For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/#custom-helm-chart-repository)), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. +For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository]({{}}/rancher/v2.x/en/catalog/custom/#custom-helm-chart-repository)), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. {{% /tab %}} {{% tab "Editing YAML Files" %}} diff --git a/content/rancher/v2.x/en/catalog/built-in/_index.md b/content/rancher/v2.x/en/catalog/built-in/_index.md index 54a1268c88f..fa81d40f40a 100644 --- a/content/rancher/v2.x/en/catalog/built-in/_index.md +++ b/content/rancher/v2.x/en/catalog/built-in/_index.md @@ -5,14 +5,14 @@ aliases: - /rancher/v2.x/en/tasks/global-configuration/catalog/enabling-default-catalogs/ --- -There are default [global catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/#global-catalogs) packaged as part of Rancher. +There are default [global catalogs]({{}}/rancher/v2.x/en/catalog/#global-catalogs) packaged as part of Rancher. ## Managing Built-in Global Catalogs ->**Prerequisites:** In order to manage the built-in catalogs or [manage global catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/adding/#adding-global-catalogs), you need _one_ of the following permissions: +>**Prerequisites:** In order to manage the built-in catalogs or [manage global catalogs]({{}}/rancher/v2.x/en/catalog/custom/adding/#adding-global-catalogs), you need _one_ of the following permissions: > ->- [Administrator Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Catalogs]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. +>- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) +>- [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Catalogs]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions prior to v2.2.0, you can select **Catalogs** directly in the navigation bar. @@ -22,7 +22,7 @@ There are default [global catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/#glo The Library Catalog includes charts curated by Rancher. Rancher stores charts in a Git repository to expedite the fetch and update of charts. In Rancher 2.x, only global catalogs are supported. Support for cluster-level and project-level charts will be added in the future. - This catalog features Rancher Charts, which include some [notable advantages]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/#chart-types) over native Helm charts. + This catalog features Rancher Charts, which include some [notable advantages]({{}}/rancher/v2.x/en/catalog/custom/#chart-types) over native Helm charts. - **Helm Stable** diff --git a/content/rancher/v2.x/en/catalog/custom/_index.md b/content/rancher/v2.x/en/catalog/custom/_index.md index 771097c6ec6..67e79a87ab2 100644 --- a/content/rancher/v2.x/en/catalog/custom/_index.md +++ b/content/rancher/v2.x/en/catalog/custom/_index.md @@ -5,7 +5,7 @@ aliases: --- -Any user can [create custom catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/creating/) to add into Rancher. Besides the content of the catalog, users must ensure their catalogs are able to be added into Rancher. +Any user can [create custom catalogs]({{}}/rancher/v2.x/en/catalog/custom/creating/) to add into Rancher. Besides the content of the catalog, users must ensure their catalogs are able to be added into Rancher. ## Types of Repositories @@ -28,7 +28,7 @@ In Rancher, you can add the custom Helm chart repository with only a catalog nam ## Catalog Fields -When [adding your catalog]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/adding/) to Rancher, you'll provide the following information: +When [adding your catalog]({{}}/rancher/v2.x/en/catalog/custom/adding/) to Rancher, you'll provide the following information: | Variable | Description | @@ -48,7 +48,7 @@ Private Git or Helm chart repositories can be added into Rancher using either cr ### Using Username and Password -1. When [adding the catalog]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/adding/), select the **Use private catalog** checkbox. +1. When [adding the catalog]({{}}/rancher/v2.x/en/catalog/custom/adding/), select the **Use private catalog** checkbox. 2. Provide the `Username` and `Password` for your Git or Helm repository. @@ -59,6 +59,6 @@ Read [using Git over HTTPS and OAuth](https://github.blog/2012-09-21-easier-buil 1. Create an [OAuth token](https://github.com/settings/tokens) with `repo` permission selected, and click **Generate token**. -2. When [adding the catalog]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/adding/), select the **Use private catalog** checkbox. +2. When [adding the catalog]({{}}/rancher/v2.x/en/catalog/custom/adding/), select the **Use private catalog** checkbox. 3. For `Username`, provide the Git generated OAuth token. For `Password`, enter `x-oauth-basic`. diff --git a/content/rancher/v2.x/en/catalog/custom/adding/_index.md b/content/rancher/v2.x/en/catalog/custom/adding/_index.md index f3813c01404..f32ad5eeee8 100644 --- a/content/rancher/v2.x/en/catalog/custom/adding/_index.md +++ b/content/rancher/v2.x/en/catalog/custom/adding/_index.md @@ -5,20 +5,20 @@ aliases: - /rancher/v2.x/en/tasks/global-configuration/catalog/adding-custom-catalogs/ --- -[Custom catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/) can be added into Rancher at any [scope of Rancher]({{< baseurl >}}/rancher/v2.x/en/catalog/#catalog-scope). +[Custom catalogs]({{}}/rancher/v2.x/en/catalog/custom/) can be added into Rancher at any [scope of Rancher]({{}}/rancher/v2.x/en/catalog/#catalog-scope). ## Adding Global Catalogs ->**Prerequisites:** In order to manage the [built-in catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/built-in/) or manage global catalogs, you need _one_ of the following permissions: +>**Prerequisites:** In order to manage the [built-in catalogs]({{}}/rancher/v2.x/en/catalog/built-in/) or manage global catalogs, you need _one_ of the following permissions: > ->- [Administrator Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Catalogs]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. +>- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) +>- [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Catalogs]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions prior to v2.2.0, you can select **Catalogs** directly in the navigation bar. 2. Click **Add Catalog**. 3. Complete the form and click **Create**. - **Result**: Your custom global catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [multi-cluster apps]({{< baseurl >}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or [applications in any project]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/) from this catalog. + **Result**: Your custom global catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [multi-cluster apps]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or [applications in any project]({{}}/rancher/v2.x/en/catalog/apps/) from this catalog. ## Adding Cluster Catalogs @@ -26,9 +26,9 @@ _Available as of v2.2.0_ >**Prerequisites:** In order to manage cluster scoped catalogs, you need _one_ of the following permissions: > ->- [Administrator Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Cluster Owner Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) ->- [Custom Cluster Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) with the [Manage Cluster Catalogs]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-role-reference) role assigned. +>- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) +>- [Cluster Owner Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) +>- [Custom Cluster Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) with the [Manage Cluster Catalogs]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-role-reference) role assigned. 1. From the **Global** view, navigate to your cluster that you want to start adding custom catalogs. 2. Choose the **Tools > Catalogs** in the navigation bar. @@ -36,7 +36,7 @@ _Available as of v2.2.0_ 3. Complete the form. By default, the form will provide the ability to select `Scope` of the catalog. When you have added a catalog from the **Cluster** scope, it is defaulted to `Cluster`. 5. Click **Create**. -**Result**: Your custom cluster catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in any project in that cluster]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/) from this catalog. +**Result**: Your custom cluster catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in any project in that cluster]({{}}/rancher/v2.x/en/catalog/apps/) from this catalog. ## Adding Project Level Catalogs @@ -44,10 +44,10 @@ _Available as of v2.2.0_ >**Prerequisites:** In order to manage project scoped catalogs, you need _one_ of the following permissions: > ->- [Administrator Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Cluster Owner Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) ->- [Project Owner Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) ->- [Custom Project Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) with the [Manage Project Catalogs]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) role assigned. +>- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) +>- [Cluster Owner Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) +>- [Project Owner Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) +>- [Custom Project Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) with the [Manage Project Catalogs]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) role assigned. 1. From the **Global** view, navigate to your project that you want to start adding custom catalogs. 2. Choose the **Tools > Catalogs** in the navigation bar. @@ -55,4 +55,4 @@ _Available as of v2.2.0_ 3. Complete the form. By default, the form will provide the ability to select `Scope` of the catalog. When you have added a catalog from the **Project** scope, it is defaulted to `Cluster`. 5. Click **Create**. -**Result**: Your custom project catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in that project]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/) from this catalog. +**Result**: Your custom project catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in that project]({{}}/rancher/v2.x/en/catalog/apps/) from this catalog. diff --git a/content/rancher/v2.x/en/catalog/custom/creating/_index.md b/content/rancher/v2.x/en/catalog/custom/creating/_index.md index bc1ed5e919d..36c1e41825e 100644 --- a/content/rancher/v2.x/en/catalog/custom/creating/_index.md +++ b/content/rancher/v2.x/en/catalog/custom/creating/_index.md @@ -61,7 +61,7 @@ Before you create your own custom catalog, you should have a basic understanding
Rancher Chart with app-readme.md (left) vs. Helm Chart without (right)
- ![app-readme.md]({{< baseurl >}}/img/rancher/app-readme.png) + ![app-readme.md]({{}}/img/rancher/app-readme.png) - `questions.yml` @@ -70,7 +70,7 @@ Before you create your own custom catalog, you should have a basic understanding
Rancher Chart with questions.yml (left) vs. Helm Chart without (right)
- ![questions.yml]({{< baseurl >}}/img/rancher/questions.png) + ![questions.yml]({{}}/img/rancher/questions.png) ### Questions.yml diff --git a/content/rancher/v2.x/en/catalog/globaldns/_index.md b/content/rancher/v2.x/en/catalog/globaldns/_index.md index ffa841ae509..463cbebc6c9 100644 --- a/content/rancher/v2.x/en/catalog/globaldns/_index.md +++ b/content/rancher/v2.x/en/catalog/globaldns/_index.md @@ -23,11 +23,11 @@ The following table lists the first version of Rancher each provider debuted. ## Global DNS Entries -For each application that you want to route traffic to, you will need to create a Global DNS Entry. This entry will use a fully qualified domain name (a.k.a FQDN) from a global DNS provider to target applications. The applications can either resolve to a single [multi-cluster application]({{< baseurl >}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or to specific projects. You must [add specific annotation labels](#adding-annotations-to-ingresses-to-program-the-external-dns) to the ingresses in order for traffic to be routed correctly to the applications. Without this annotation, the programming for the DNS entry will not work. +For each application that you want to route traffic to, you will need to create a Global DNS Entry. This entry will use a fully qualified domain name (a.k.a FQDN) from a global DNS provider to target applications. The applications can either resolve to a single [multi-cluster application]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or to specific projects. You must [add specific annotation labels](#adding-annotations-to-ingresses-to-program-the-external-dns) to the ingresses in order for traffic to be routed correctly to the applications. Without this annotation, the programming for the DNS entry will not work. ## Permissions for Global DNS Providers/Entries -By default, only [global administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) and the creator of the Global DNS provider or Global DNS entry have access to use, edit and delete them. When creating the provider or entry, the creator can add additional users in order for those users to access and manage them. By default, these members will get `Owner` role to manage them. +By default, only [global administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) and the creator of the Global DNS provider or Global DNS entry have access to use, edit and delete them. When creating the provider or entry, the creator can add additional users in order for those users to access and manage them. By default, these members will get `Owner` role to manage them. ## Setting up Global DNS for Applications @@ -63,7 +63,7 @@ By default, only [global administrators]({{< baseurl >}}/rancher/v2.x/en/admin-s >**Notes:** > ->- Alibaba Cloud SDK uses TZ data. It needs to be present on `/usr/share/zoneinfo` path of the nodes running [`local` cluster]({{< baseurl >}}/rancher/v2.x/en/installation/options/chart-options/#import-local-cluster), and it is mounted to the external DNS pods. If it is not available on the nodes, please follow the [instruction](https://www.ietf.org/timezones/tzdb-2018f/tz-link.html) to prepare it. +>- Alibaba Cloud SDK uses TZ data. It needs to be present on `/usr/share/zoneinfo` path of the nodes running [`local` cluster]({{}}/rancher/v2.x/en/installation/options/chart-options/#import-local-cluster), and it is mounted to the external DNS pods. If it is not available on the nodes, please follow the [instruction](https://www.ietf.org/timezones/tzdb-2018f/tz-link.html) to prepare it. >- Different versions of AliDNS have different allowable TTL range, where the default TTL for a global DNS entry may not be valid. Please see the [reference](https://www.alibabacloud.com/help/doc-detail/34338.htm) before adding an AliDNS entry. {{% /accordion %}} @@ -73,7 +73,7 @@ By default, only [global administrators]({{< baseurl >}}/rancher/v2.x/en/admin-s 1. Click on **Add DNS Entry**. 1. Enter the **FQDN** you wish to program on the external DNS. 1. Select a Global DNS **Provider** from the list. -1. Select if this DNS entry will be for a [multi-cluster application]({{< baseurl >}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or for workloads in different [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). You will need to ensure that [annotations are added to any ingresses](#adding-annotations-to-ingresses-to-program-the-external-dns) for the applications that you want to target. +1. Select if this DNS entry will be for a [multi-cluster application]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or for workloads in different [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). You will need to ensure that [annotations are added to any ingresses](#adding-annotations-to-ingresses-to-program-the-external-dns) for the applications that you want to target. 1. Configure the **DNS TTL** value in seconds. By default, it will be 300 seconds. 1. Under **Member Access**, search for any users that you want to have the ability to manage this Global DNS entry. @@ -85,11 +85,11 @@ In order for Global DNS entries to be programmed, you will need to add a specifi 1. In order for the DNS to be programmed, the following requirements must be met: * The ingress routing rule must be set to use a `hostname` that matches the FQDN of the Global DNS entry. * The ingress must have an annotation (`rancher.io/globalDNS.hostname`) and the value of this annotation should match the FQDN of the Global DNS entry. -1. Once the ingress in your [multi-cluster application]({{< baseurl >}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or in your target projects are in `active` state, the FQDN will be programmed on the external DNS against the Ingress IP addresses. +1. Once the ingress in your [multi-cluster application]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or in your target projects are in `active` state, the FQDN will be programmed on the external DNS against the Ingress IP addresses. ## Editing a Global DNS Provider -The [global administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), creator of the Global DNS provider and any users added as `members` to a Global DNS provider, have _owner_ access to that provider. Any members can edit the following fields: +The [global administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), creator of the Global DNS provider and any users added as `members` to a Global DNS provider, have _owner_ access to that provider. Any members can edit the following fields: - Root Domain - Access Key & Secret Key @@ -101,7 +101,7 @@ The [global administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/ ## Editing a Global DNS Entry -The [global administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), creator of the Global DNS entry and any users added as `members` to a Global DNS entry, have _owner_ access to that DNS entry. Any members can edit the following fields: +The [global administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), creator of the Global DNS entry and any users added as `members` to a Global DNS entry, have _owner_ access to that DNS entry. Any members can edit the following fields: - FQDN - Global DNS Provider diff --git a/content/rancher/v2.x/en/catalog/multi-cluster-apps/_index.md b/content/rancher/v2.x/en/catalog/multi-cluster-apps/_index.md index e1ec64524d8..282835a53e4 100644 --- a/content/rancher/v2.x/en/catalog/multi-cluster-apps/_index.md +++ b/content/rancher/v2.x/en/catalog/multi-cluster-apps/_index.md @@ -6,9 +6,9 @@ _Available as of v2.2.0_ Typically, most applications are deployed on a single Kubernetes cluster, but there will be times you might want to deploy multiple copies of the same application across different clusters and/or projects. In Rancher, a _multi-cluster application_, is an application deployed using a Helm chart across multiple clusters. With the ability to deploy the same application across multiple clusters, it avoids the repetition of the same action on each cluster, which could introduce user error during application configuration. With multi-cluster applications, you can customize to have the same configuration across all projects/clusters as well as have the ability to change the configuration based on your target project. Since multi-cluster application is considered a single application, it's easy to manage and maintain this application. -Any Helm charts from a [global catalog]({{< baseurl >}}/rancher/v2.x/en/catalog/#catalog-scope) can be used to deploy and manage multi-cluster applications. +Any Helm charts from a [global catalog]({{}}/rancher/v2.x/en/catalog/#catalog-scope) can be used to deploy and manage multi-cluster applications. -After creating a multi-cluster application, you can program a [Global DNS entry]({{< baseurl >}}/rancher/v2.x/en/catalog/globaldns/) to make it easier to access the application. +After creating a multi-cluster application, you can program a [Global DNS entry]({{}}/rancher/v2.x/en/catalog/globaldns/) to make it easier to access the application. # Prerequisites @@ -45,7 +45,7 @@ Rancher has divided the configuration option for the multi-cluster application i #### Targets -In the **Targets** section, select the [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects) that you want the application to be deployed in. The list of projects is based on what projects you have access to. For each project that you select, it will be added to the list, which shows the cluster name and project name that were selected. To remove a target project, click on **-**. +In the **Targets** section, select the [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects) that you want the application to be deployed in. The list of projects is based on what projects you have access to. For each project that you select, it will be added to the list, which shows the cluster name and project name that were selected. To remove a target project, click on **-**. #### Upgrades @@ -57,15 +57,15 @@ In the **Upgrades** section, select the upgrade strategy to use, when you decide #### Roles -In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/#launching-catalog-applications), that specific user's permissions are used for creation of all workloads/resources that is required by the app. +In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications]({{}}/rancher/v2.x/en/catalog/apps/#launching-catalog-applications), that specific user's permissions are used for creation of all workloads/resources that is required by the app. For multi-cluster applications, the application is deployed by a _system user_ and is assigned as the creator of all underlying resources. A _system user_ is used instead of the actual user due to the fact that the actual user could be removed from one of the target projects. If the actual user was removed from one of the projects, then that user would no longer be able to manage the application for the other projects. Rancher will let you select from two options for Roles, **Project** and **Cluster**. Rancher will allow creation using any of these roles based on the user's permissions. -- **Project** - This is the equivalent of a [project member]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), a [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or a [project owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles), then the user is considered to have the appropriate level of permissions. +- **Project** - This is the equivalent of a [project member]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), a [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or a [project owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles), then the user is considered to have the appropriate level of permissions. -- **Cluster** - This is the equivalent of a [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _cluster owner_ role, if the user is an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), then the user is considered to have the appropriate level of permissions. +- **Cluster** - This is the equivalent of a [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _cluster owner_ role, if the user is an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), then the user is considered to have the appropriate level of permissions. When launching the application, Rancher will confirm if you have these permissions in the target projects before launching the application. @@ -83,7 +83,7 @@ If the Helm chart that you are deploying contains a `questions.yml` file, Ranche #### Key Value Pairs for Native Helm Charts -For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/#custom-helm-chart-repository)), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. +For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository]({{}}/rancher/v2.x/en/catalog/custom/#custom-helm-chart-repository)), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. ### Members diff --git a/content/rancher/v2.x/en/cli/_index.md b/content/rancher/v2.x/en/cli/_index.md index 0baa8f9da86..dd4d656fd19 100644 --- a/content/rancher/v2.x/en/cli/_index.md +++ b/content/rancher/v2.x/en/cli/_index.md @@ -16,8 +16,8 @@ The binary can be downloaded directly from the UI. The link can be found in the After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: -- Your [Rancher Server URL]({{< baseurl >}}/rancher/v2.x/en/admin-settings/server-url), which is used to connect to Rancher Server. -- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key]({{< baseurl >}}/rancher/v2.x/en/user-settings/api-keys/). +- Your [Rancher Server URL]({{}}/rancher/v2.x/en/admin-settings/server-url), which is used to connect to Rancher Server. +- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key]({{}}/rancher/v2.x/en/user-settings/api-keys/). ### CLI Authentication @@ -31,7 +31,7 @@ If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to con ### Project Selection -Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. +Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. **Example: `./rancher context switch` Output** ``` @@ -57,17 +57,17 @@ The following commands are available for use in Rancher CLI. | Command | Result | |---|---| -| `apps, [app]` | Performs operations on catalog applications (i.e. individual [Helm charts](https://docs.helm.sh/developing_charts/) or [Rancher charts]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/#chart-directory-structure)). | -| `catalog` | Performs operations on [catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/). | -| `clusters, [cluster]` | Performs operations on your [clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/). | -| `context` | Switches between Rancher [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). For an example, see [Project Selection](#project-selection). | -| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) and [workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/)). Specify resources by name or ID. | +| `apps, [app]` | Performs operations on catalog applications (i.e. individual [Helm charts](https://docs.helm.sh/developing_charts/) or [Rancher charts]({{}}/rancher/v2.x/en/catalog/custom/#chart-directory-structure)). | +| `catalog` | Performs operations on [catalogs]({{}}/rancher/v2.x/en/catalog/). | +| `clusters, [cluster]` | Performs operations on your [clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). | +| `context` | Switches between Rancher [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). For an example, see [Project Selection](#project-selection). | +| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) and [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/)). Specify resources by name or ID. | | `kubectl` |Runs [kubectl commands](https://kubernetes.io/docs/reference/kubectl/overview/#operations). | | `login, [l]` | Logs into a Rancher Server. For an example, see [CLI Authentication](#cli-authentication). | -| `namespaces, [namespace]` |Performs operations on [namespaces]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). | -| `nodes, [node]` |Performs operations on [nodes]({{< baseurl >}}/rancher/v2.x/en/overview/architecture/#kubernetes). | -| `projects, [project]` | Performs operations on [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). | -| `ps` | Displays [workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads) in a project. | +| `namespaces, [namespace]` |Performs operations on [namespaces]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). | +| `nodes, [node]` |Performs operations on [nodes]({{}}/rancher/v2.x/en/overview/architecture/#kubernetes). | +| `projects, [project]` | Performs operations on [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). | +| `ps` | Displays [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads) in a project. | | `settings, [setting]` | Shows the current settings for your Rancher Server. | | `ssh` | Connects to one of your cluster nodes using the SSH protocol. | | `help, [h]` | Shows a list of commands or help for one command. | diff --git a/content/rancher/v2.x/en/cluster-admin/_index.md b/content/rancher/v2.x/en/cluster-admin/_index.md index 09397d9c2c7..ec93dd077f7 100644 --- a/content/rancher/v2.x/en/cluster-admin/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/_index.md @@ -21,22 +21,22 @@ Alternatively, you can switch between projects and clusters directly in the navi ## Managing Clusters in Rancher -After clusters have been [provisioned into Rancher]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/), [cluster owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. +After clusters have been [provisioned into Rancher]({{}}/rancher/v2.x/en/cluster-provisioning/), [cluster owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. -| Action | [Rancher launched Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) | [Hosted Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) | [Imported Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters) | +| Action | [Rancher launched Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) | [Hosted Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) | [Imported Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters) | | --- | --- | ---| ---| -| [Using kubectl and a kubeconfig file to Access a Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) | * | * | * | -| [Adding Cluster Members]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/) | * | * | * | -| [Editing Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/editing-clusters/) | * | * | * | -| [Managing Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/nodes) | * | * | * | -| [Managing Persistent Volumes and Storage Classes]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) | * | * | * | -| [Managing Projects and Namespaces]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/projects-and-namespaces/) | * | * | * | +| [Using kubectl and a kubeconfig file to Access a Cluster]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) | * | * | * | +| [Adding Cluster Members]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/) | * | * | * | +| [Editing Clusters]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) | * | * | * | +| [Managing Nodes]({{}}/rancher/v2.x/en/cluster-admin/nodes) | * | * | * | +| [Managing Persistent Volumes and Storage Classes]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) | * | * | * | +| [Managing Projects and Namespaces]({{}}/rancher/v2.x/en/cluster-admin/projects-and-namespaces/) | * | * | * | | [Configuring Tools](#configuring-tools) | * | * | * | -| [Cloning Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/cloning-clusters/)| | * | * | -| [Ability to rotate certificates]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/certificate-rotation/) | * | | | -| [Ability to back up your Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/) | * | | | -| [Ability to recover and restore etcd]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/restoring-etcd/) | * | | | -| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/) | * | | | +| [Cloning Clusters]({{}}/rancher/v2.x/en/cluster-admin/cloning-clusters/)| | * | * | +| [Ability to rotate certificates]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/) | * | | | +| [Ability to back up your Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/) | * | | | +| [Ability to recover and restore etcd]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/) | * | | | +| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher]({{}}/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/) | * | | | ## Configuring Tools @@ -47,4 +47,4 @@ Rancher contains a variety of tools that aren't included in Kubernetes to assist - Logging - Monitoring -For more information, see [Tools]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/) +For more information, see [Tools]({{}}/rancher/v2.x/en/cluster-admin/tools/) diff --git a/content/rancher/v2.x/en/cluster-admin/backing-up-etcd/_index.md b/content/rancher/v2.x/en/cluster-admin/backing-up-etcd/_index.md index e4aa716ccbb..227c4cee080 100644 --- a/content/rancher/v2.x/en/cluster-admin/backing-up-etcd/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/backing-up-etcd/_index.md @@ -5,11 +5,11 @@ weight: 2045 _Available as of v2.2.0_ -In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) can be easily performed. Snapshots of the etcd database are taken and saved either [locally onto the etcd nodes](#local-backup-target) or to a [S3 compatible target](#s3-backup-target). The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. +In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) can be easily performed. Snapshots of the etcd database are taken and saved either [locally onto the etcd nodes](#local-backup-target) or to a [S3 compatible target](#s3-backup-target). The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. Rancher recommends configuring recurrent `etcd` snapshots for all production clusters. Additionally, one-time snapshots can easily be taken as well. ->**Note:** If you have any Rancher launched Kubernetes clusters that were created prior to v2.2.0, after upgrading Rancher, you must [edit the cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots prior to v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/restoring-etcd/). +>**Note:** If you have any Rancher launched Kubernetes clusters that were created prior to v2.2.0, after upgrading Rancher, you must [edit the cluster]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots prior to v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/). # Snapshot Creation Period and Retention Count @@ -17,7 +17,7 @@ Select how often you want recurring snapshots to be taken as well as how many sn ### Configuring Recurring Snapshots for the Cluster -By default, [Rancher launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) are configured to take recurring snapshots (saved to local disk). To protect against local disk failure, using the [S3 Target](#s3-backup-target) or replicating the path on disk is advised. +By default, [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) are configured to take recurring snapshots (saved to local disk). To protect against local disk failure, using the [S3 Target](#s3-backup-target) or replicating the path on disk is advised. During cluster provisioning or editing the cluster, the configuration for snapshots can be found in the advanced section for **Cluster Options**. Click on **Show advanced options**. @@ -49,7 +49,7 @@ Rancher supports two different backup targets: ### Local Backup Target -By default, the `local` backup target is selected. The benefits of this option is that there is no external configuration. Snapshots are automatically saved locally to the etcd nodes in the [Rancher launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) in `/opt/rke/etcd-snapshots`. All recurring snapshots are taken at configured intervals. The downside of using the `local` backup target is that if there is a total disaster and _all_ etcd nodes are lost, there is no ability to restore the cluster. +By default, the `local` backup target is selected. The benefits of this option is that there is no external configuration. Snapshots are automatically saved locally to the etcd nodes in the [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) in `/opt/rke/etcd-snapshots`. All recurring snapshots are taken at configured intervals. The downside of using the `local` backup target is that if there is a total disaster and _all_ etcd nodes are lost, there is no ability to restore the cluster. #### Safe Timestamps diff --git a/content/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/_index.md b/content/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/_index.md index c5929f81b0c..8ce29334dda 100644 --- a/content/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/_index.md @@ -6,7 +6,7 @@ weight: 2055 This section describes how to disconnect a node from a Rancher-launched Kubernetes cluster and remove all of the Kubernetes components from the node. This process allows you to use the node for other purposes. -When you use Rancher to [launch nodes for a cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher), resources (containers/virtual network interfaces) and configuration items (certificates/configuration files) are created. +When you use Rancher to [launch nodes for a cluster]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher), resources (containers/virtual network interfaces) and configuration items (certificates/configuration files) are created. When removing nodes from your Rancher launched Kubernetes cluster (provided that they are in `Active` state), those resources are automatically cleaned, and the only action needed is to restart the node. When a node has become unreachable and the automatic cleanup process cannot be used, we describe the steps that need to be executed before the node can be added to a cluster again. @@ -24,10 +24,10 @@ When cleaning nodes provisioned using Rancher, the following components are dele | All resources create under the `management.cattle.io` API Group | ✓ | ✓ | ✓ | | | All CRDs created by Rancher v2.x | ✓ | ✓ | ✓ | | -[1]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/ +[1]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ +[2]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/ +[3]: {{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ +[4]: {{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/ ## Removing a Node from a Cluster by Rancher UI @@ -59,7 +59,7 @@ After the imported cluster is detached from Rancher, the cluster's workloads wil {{% tab "By UI / API" %}} >**Warning:** This process will remove data from your cluster. Make sure you have created a backup of files you want to keep before executing the command, as data will be lost. -After you initiate the removal of an [imported cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#import-existing-cluster) using the Rancher UI (or API), the following events occur. +After you initiate the removal of an [imported cluster]({{}}/rancher/v2.x/en/cluster-provisioning/#import-existing-cluster) using the Rancher UI (or API), the following events occur. 1. Rancher creates a `serviceAccount` that it uses to remove the Rancher components from the cluster. This account is assigned the [clusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) and [clusterRoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) permissions, which are required to remove the Rancher components. diff --git a/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md b/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md index b097e8b7b12..9e9335b1dd7 100644 --- a/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md @@ -13,16 +13,16 @@ Duplication of imported clusters, clusters in hosted Kubernetes providers, and c | Cluster Type | Cloneable? | |----------------------------------|---------------| -| [Nodes Hosted by Infrastructure Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) | ✓ | -| [Hosted Kubernetes Providers]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) | | -| [Custom Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/) | | -| [Imported Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) | | +| [Nodes Hosted by Infrastructure Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) | ✓ | +| [Hosted Kubernetes Providers]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) | | +| [Custom Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/) | | +| [Imported Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) | | > **Warning:** During the process of duplicating a cluster, you will edit a config file full of cluster settings. However, we recommend editing only values explicitly listed in this document, as cluster duplication is designed for simple cluster copying, _not_ wide scale configuration changes. Editing other values may invalidate the config file, which will lead to cluster deployment failure. ## Prerequisites -Download and install [Rancher CLI]({{< baseurl >}}/rancher/v2.x/en/cli). Remember to [create an API bearer token]({{< baseurl >}}/rancher/v2.x/en/user-settings/api-keys) if necessary. +Download and install [Rancher CLI]({{}}/rancher/v2.x/en/cli). Remember to [create an API bearer token]({{}}/rancher/v2.x/en/user-settings/api-keys) if necessary. ## 1. Export Cluster Config diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-access/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-access/_index.md index 973ba43dcce..1e530ae86cf 100644 --- a/content/rancher/v2.x/en/cluster-admin/cluster-access/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/cluster-access/_index.md @@ -17,18 +17,18 @@ There are many ways you can interact with Kubernetes clusters that are managed b Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. - For more information, see [Accessing Clusters with kubectl Shell]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-shell). + For more information, see [Accessing Clusters with kubectl Shell]({{}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-shell). - **Terminal remote connection** You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. - For more information, see [Accessing Clusters with kubectl and a kubeconfig File]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-and-a-kubeconfig-file). + For more information, see [Accessing Clusters with kubectl and a kubeconfig File]({{}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-and-a-kubeconfig-file). - **Rancher CLI** - You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI]({{< baseurl >}}/rancher/v2.x/en/cli/). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. + You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI]({{}}/rancher/v2.x/en/cli/). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. - **Rancher API** - Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key]({{< baseurl >}}/rancher/v2.x/en/user-settings/api-keys/). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file + Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key]({{}}/rancher/v2.x/en/user-settings/api-keys/). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/_index.md index 154fea58a24..0edd67b0730 100644 --- a/content/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/_index.md @@ -9,7 +9,7 @@ aliases: If you want to provide a user with access and permissions to _all_ projects, nodes, and resources within a cluster, assign the user a cluster membership. ->**Tip:** Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members/) instead. +>**Tip:** Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members/) instead. There are two contexts where you can add cluster members: @@ -33,23 +33,23 @@ Cluster administrators can edit the membership for a cluster, controlling which If external authentication is configured: - - Rancher returns users from your [external authentication]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/) source as you type. + - Rancher returns users from your [external authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/) source as you type. >**Using AD but can't find your users?** - >There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/ad/). + >There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5]({{}}/rancher/v2.x/en/admin-settings/authentication/ad/). - A drop-down allows you to add groups instead of individual users. The drop-down only lists groups that you, the logged in user, are part of. - >**Note:** If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). + >**Note:** If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). 4. Assign the user or group **Cluster** roles. - [What are Cluster Roles?]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) + [What are Cluster Roles?]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) >**Tip:** For Custom Roles, you can modify the list of individual roles available for assignment. > - > - To add roles to the list, [Add a Custom Role]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). - > - To remove roles from the list, [Lock/Unlock Roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/locked-roles). + > - To add roles to the list, [Add a Custom Role]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). + > - To remove roles from the list, [Lock/Unlock Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/locked-roles). **Result:** The chosen users are added to the cluster. diff --git a/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md b/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md index 5c2cf122f0c..1b2d42d582a 100644 --- a/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md @@ -8,7 +8,7 @@ After you provision a Kubernetes cluster using Rancher, you can still edit optio To Edit an Existing Cluster ![Edit Cluster]({{}}/img/rancher/edit-cluster.png) -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. +The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. The following table summarizes the options and settings available for each cluster type: @@ -24,7 +24,7 @@ Cluster administrators can [edit the membership for a cluster,]({{}}/ra ## Cluster Options -When editing clusters, clusters that are [launched using RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) feature more options than clusters that are imported or hosted by a Kubernetes provider. The headings that follow document options available only for RKE clusters. +When editing clusters, clusters that are [launched using RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) feature more options than clusters that are imported or hosted by a Kubernetes provider. The headings that follow document options available only for RKE clusters. ### Updating ingress-nginx @@ -34,26 +34,26 @@ If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delet # Editing Other Cluster Options -In [clusters launched by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), you can edit any of the remaining options that follow. +In [clusters launched by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), you can edit any of the remaining options that follow. >**Note:** These options are not available for imported clusters or hosted Kubernetes clusters. Options for RKE Clusters -![Cluster Options]({{< baseurl >}}/img/rancher/cluster-options.png) +![Cluster Options]({{}}/img/rancher/cluster-options.png) Option | Description | ---------|----------| Kubernetes Version | The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes]({{}}/rancher/v2.x/en/cluster-admin/upgrading-kubernetes). | - Network Provider | The [container networking interface]({{< baseurl >}}/rancher/v2.x/en/faq/networking/#cni-providers) that powers networking for your cluster.

**Note:** You can only choose this option while provisioning your cluster. It cannot be edited later. | + Network Provider | The [container networking interface]({{}}/rancher/v2.x/en/faq/networking/#cni-providers) that powers networking for your cluster.

**Note:** You can only choose this option while provisioning your cluster. It cannot be edited later. | Project Network Isolation | As of Rancher v2.0.7, if you're using the Canal network provider, you can choose whether to enable or disable inter-project communication. | Nginx Ingress | If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use Nginx ingress within the cluster. | Metrics Server Monitoring | Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. | - Pod Security Policy Support | Enables [pod security policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. | - Docker version on nodes | Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a [supported Docker version]({{< baseurl >}}/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/), Rancher will stop pods from running on nodes that don't have a supported Docker version installed. | + Pod Security Policy Support | Enables [pod security policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. | + Docker version on nodes | Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a [supported Docker version]({{}}/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/), Rancher will stop pods from running on nodes that don't have a supported Docker version installed. | Docker Root Directory | The directory on your cluster nodes where you've installed Docker. If you install Docker on your nodes to a non-default directory, update this path. | Default Pod Security Policy | If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. | - Cloud Provider | If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. | + Cloud Provider | If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. |
# Editing Cluster as YAML @@ -67,6 +67,6 @@ Instead of using the Rancher UI to choose Kubernetes options for the cluster, ad In Rancher v2.0.0-v2.2.x, the config file is identical to the [cluster config file for the Rancher Kubernetes Engine]({{}}/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the [cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) -![image]({{< baseurl >}}/img/rancher/cluster-options-yaml.png) +![image]({{}}/img/rancher/cluster-options-yaml.png) -For an example of RKE config file syntax, see the [RKE documentation]({{< baseurl >}}/rke/latest/en/example-yamls/). +For an example of RKE config file syntax, see the [RKE documentation]({{}}/rke/latest/en/example-yamls/). diff --git a/content/rancher/v2.x/en/cluster-admin/nodes/_index.md b/content/rancher/v2.x/en/cluster-admin/nodes/_index.md index 31c3e595bae..352a34c2242 100644 --- a/content/rancher/v2.x/en/cluster-admin/nodes/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/nodes/_index.md @@ -5,7 +5,7 @@ aliases: - /rancher/v2.x/en/k8s-in-rancher/nodes/ --- -After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) to provision the cluster, there are different node options available. +After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) to provision the cluster, there are different node options available. This page covers the following topics: @@ -20,11 +20,11 @@ This page covers the following topics: To manage individual nodes, browse to the cluster that you want to manage and then select **Nodes** from the main menu. You can open the options menu for a node by clicking its **Ellipsis** icon (**...**). ->**Note:** If you want to manage the _cluster_ and not individual nodes, see [Editing Clusters]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters). +>**Note:** If you want to manage the _cluster_ and not individual nodes, see [Editing Clusters]({{}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters). # Node Options for Each Type of Cluster -The following table lists which node options are available for each [type of cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-options) in Rancher. Click the links in the **Option** column for more detailed information about each feature. +The following table lists which node options are available for each [type of cluster]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-options) in Rancher. Click the links in the **Option** column for more detailed information about each feature. | Option | [Nodes Hosted by an Infrastructure Provider][1] | [Custom Node][2] | [Hosted Cluster][3] | [Imported Nodes][4] | Description | | ------------------------------------------------ | ------------------------------------------------ | ---------------- | ------------------- | ------------------- | ------------------------------------------------------------------ | @@ -36,22 +36,22 @@ The following table lists which node options are available for each [type of clu | [Download Keys](#ssh-into-a-node-hosted-by-an-infrastructure-provider) | ✓ | | | | Download SSH key for in order to SSH into the node. | | [Node Scaling](#scaling-nodes) | ✓ | | | | Scale the number of nodes in the node pool up or down. | -[1]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/ +[1]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ +[2]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/ +[3]: {{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ +[4]: {{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/ ### Notes for Node Pool Nodes -Clusters provisioned using [one of the node pool options]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) automatically maintain the node scale that's set during the initial cluster provisioning. This scale determines the number of active nodes that Rancher maintains for the cluster. +Clusters provisioned using [one of the node pool options]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) automatically maintain the node scale that's set during the initial cluster provisioning. This scale determines the number of active nodes that Rancher maintains for the cluster. ### Notes for Nodes Provisioned by Hosted Kubernetes Providers -Options for managing nodes [hosted by a Kubernetes provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. +Options for managing nodes [hosted by a Kubernetes provider]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. ### Notes for Imported Nodes -Although you can deploy workloads to an [imported cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) using Rancher, you cannot manage individual cluster nodes. All management of imported cluster nodes must take place outside of Rancher. +Although you can deploy workloads to an [imported cluster]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) using Rancher, you cannot manage individual cluster nodes. All management of imported cluster nodes must take place outside of Rancher. # Cordoning and Draining Nodes @@ -135,7 +135,7 @@ Editing a node lets you: # Viewing a Node API -Select this option to view the node's [API endpoints]({{< baseurl >}}/rancher/v2.x/en/api/). +Select this option to view the node's [API endpoints]({{}}/rancher/v2.x/en/api/). # Deleting a Node @@ -151,7 +151,7 @@ For nodes hosted by an infrastructure provider, you can scale the number of node # SSH into a Node Hosted by an Infrastructure Provider -For [nodes hosted by an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/), you have the option of downloading its SSH key so that you can connect to it remotely from your desktop. +For [nodes hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/), you have the option of downloading its SSH key so that you can connect to it remotely from your desktop. 1. From the cluster hosted by an infrastructure provider, select **Nodes** from the main menu. @@ -172,11 +172,11 @@ For [nodes hosted by an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en # Managing Node Pools -> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) The node pool features are not available for imported clusters or clusters hosted by a Kubernetes provider. +> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) The node pool features are not available for imported clusters or clusters hosted by a Kubernetes provider. -In clusters [launched by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), you can: +In clusters [launched by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), you can: -- Add new [pools of nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to your cluster. The nodes added to the pool are provisioned according to the [node template]({{< baseurl >}}/rancher/v2.x/en/user-settings/node-templates/) that you use. +- Add new [pools of nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to your cluster. The nodes added to the pool are provisioned according to the [node template]({{}}/rancher/v2.x/en/user-settings/node-templates/) that you use. - Click **+** and follow the directions on screen to create a new template. @@ -184,4 +184,4 @@ In clusters [launched by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioni - Redistribute Kubernetes roles amongst your node pools by making different checkbox selections -- Scale the number of nodes in a pool up or down (although, if you simply want to maintain your node scale, we recommend using the cluster's [Nodes tab]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/nodes/#nodes-provisioned-by-node-pool) instead.) +- Scale the number of nodes in a pool up or down (although, if you simply want to maintain your node scale, we recommend using the cluster's [Nodes tab]({{}}/rancher/v2.x/en/k8s-in-rancher/nodes/#nodes-provisioned-by-node-pool) instead.) diff --git a/content/rancher/v2.x/en/cluster-admin/pod-security-policy/_index.md b/content/rancher/v2.x/en/cluster-admin/pod-security-policy/_index.md index 11e415f5b3a..730255113e3 100644 --- a/content/rancher/v2.x/en/cluster-admin/pod-security-policy/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/pod-security-policy/_index.md @@ -3,9 +3,9 @@ title: Adding a Pod Security Policy weight: 80 --- -> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) +> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) -When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. +When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. You can assign a pod security policy when you provision a cluster. However, if you need to relax or restrict security for your pods later, you can update the policy while editing your cluster. @@ -15,11 +15,11 @@ You can assign a pod security policy when you provision a cluster. However, if y 3. From **Pod Security Policy Support**, select **Enabled**. - >**Note:** This option is only available for clusters [provisioned by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). + >**Note:** This option is only available for clusters [provisioned by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). 4. From the **Default Pod Security Policy** drop-down, select the policy you want to apply to the cluster. - Rancher ships with [policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/#default-pod-security-policies) as well. + Rancher ships with [policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/#default-pod-security-policies) as well. 5. Click **Save**. diff --git a/content/rancher/v2.x/en/cluster-admin/restoring-etcd/_index.md b/content/rancher/v2.x/en/cluster-admin/restoring-etcd/_index.md index 0c70a823b0d..2194f5c43b1 100644 --- a/content/rancher/v2.x/en/cluster-admin/restoring-etcd/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/restoring-etcd/_index.md @@ -5,11 +5,11 @@ weight: 2050 _Available as of v2.2.0_ -etcd backup and recovery for [Rancher launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. +etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. -Rancher recommends enabling the [ability to set up recurring snapshots of etcd]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots-for-the-cluster), but [one-time snapshots]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-your-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). +Rancher recommends enabling the [ability to set up recurring snapshots of etcd]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots-for-the-cluster), but [one-time snapshots]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-your-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). ->**Note:** If you have any Rancher launched Kubernetes clusters that were created prior to v2.2.0, after upgrading Rancher, you must [edit the cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the [updated snapshot features]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/). Even if you were already creating snapshots prior to v2.2.0, you must do this step as the older snapshots will not be available to use to back up and restore etcd through the UI. +>**Note:** If you have any Rancher launched Kubernetes clusters that were created prior to v2.2.0, after upgrading Rancher, you must [edit the cluster]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the [updated snapshot features]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/). Even if you were already creating snapshots prior to v2.2.0, you must do this step as the older snapshots will not be available to use to back up and restore etcd through the UI. ## Viewing Available Snapshots @@ -33,11 +33,11 @@ If your Kubernetes cluster is broken, you can restore the cluster from a snapsho **Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. -> **Note:** If you are restoring a cluster with unavailable etcd nodes, it's recommended that all etcd nodes are removed from Rancher before attempting to restore. For clusters that were provisioned using [nodes hosted in an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/), new etcd nodes will automatically be created. For [custom clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/), please ensure that you add new etcd nodes to the cluster. +> **Note:** If you are restoring a cluster with unavailable etcd nodes, it's recommended that all etcd nodes are removed from Rancher before attempting to restore. For clusters that were provisioned using [nodes hosted in an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/), new etcd nodes will automatically be created. For [custom clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/), please ensure that you add new etcd nodes to the cluster. ## Recovering etcd without a Snapshot -If the group of etcd nodes loses quorum, the Kubernetes cluster will report a failure because no operations, e.g. deploying workloads, can be executed in the Kubernetes cluster. Please review the best practices for the what the [number of etcd nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/production/#count-of-etcd-nodes) should be in a Kubernetes cluster. If you want to recover your set of etcd nodes, follow these instructions: +If the group of etcd nodes loses quorum, the Kubernetes cluster will report a failure because no operations, e.g. deploying workloads, can be executed in the Kubernetes cluster. Please review the best practices for the what the [number of etcd nodes]({{}}/rancher/v2.x/en/cluster-provisioning/production/#count-of-etcd-nodes) should be in a Kubernetes cluster. If you want to recover your set of etcd nodes, follow these instructions: 1. Keep only one etcd node in the cluster by removing all other etcd nodes. @@ -63,4 +63,4 @@ If the group of etcd nodes loses quorum, the Kubernetes cluster will report a fa 5. Run the revised command. -6. After the single nodes is up and running, Rancher recommends adding additional etcd nodes to your cluster. If you have a [custom cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/) and you want to reuse an old node, you are required to [clean up the nodes]({{< baseurl >}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) before attempting to add them back into a cluster. +6. After the single nodes is up and running, Rancher recommends adding additional etcd nodes to your cluster. If you have a [custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/) and you want to reuse an old node, you are required to [clean up the nodes]({{}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) before attempting to add them back into a cluster. diff --git a/content/rancher/v2.x/en/cluster-admin/tools/alerts/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/alerts/_index.md index d8d19368108..0ba6ff6df0b 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/alerts/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/alerts/_index.md @@ -11,7 +11,7 @@ Before you can receive alerts, you must configure one or more notifier in Ranche When you create a cluster, some alert rules are predefined. You can receive these alerts if you configure a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) for them. -For details about what triggers the predefined alerts, refer to the [documentation on default alerts.]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts) +For details about what triggers the predefined alerts, refer to the [documentation on default alerts.]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts) This section covers the following topics: @@ -25,8 +25,8 @@ This section covers the following topics: Some examples of alert events are: -- A Kubernetes [master component]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) entering an unhealthy state. -- A node or [workload]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/) error occurring. +- A Kubernetes [master component]({{}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) entering an unhealthy state. +- A node or [workload]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/) error occurring. - A scheduled deployment taking place as planned. - A node's hardware resources becoming overstressed. @@ -36,7 +36,7 @@ You can set an urgency level for each alert. This urgency appears in the notific # Scope of Alerts -The scope for alerts can be set at either the cluster level or [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/alerts/). +The scope for alerts can be set at either the cluster level or [project level]({{}}/rancher/v2.x/en/project-admin/tools/alerts/). At the cluster level, Rancher monitors components in your Kubernetes cluster, and sends you alerts related to: @@ -47,9 +47,9 @@ At the cluster level, Rancher monitors components in your Kubernetes cluster, an # Adding Cluster Alerts -As a [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send you alerts for cluster events. +As a [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send you alerts for cluster events. ->**Prerequisite:** Before you can receive cluster alerts, you must [add a notifier]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers). +>**Prerequisite:** Before you can receive cluster alerts, you must [add a notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers). 1. From the **Global** view, navigate to the cluster that you want to configure cluster alerts for. Select **Tools > Alerts**. Then click **Add Alert Group**. @@ -180,7 +180,7 @@ This alert type monitors for the overload from Prometheus expression querying, i - [**ETCD**](https://etcd.io/docs/v3.4.0/op-guide/monitoring/) - [**Kubernetes Components**](https://github.com/kubernetes/metrics) - [**Kubernetes Resources**](https://github.com/kubernetes/kube-state-metrics) - - [**Fluentd**](https://docs.fluentd.org/v1.0/articles/monitoring-prometheus) (supported by [Logging]({{< baseurl >}}/rancher/v2.x/en/tools/logging)) + - [**Fluentd**](https://docs.fluentd.org/v1.0/articles/monitoring-prometheus) (supported by [Logging]({{}}/rancher/v2.x/en/tools/logging)) - [**Cluster Level Grafana**](http://docs.grafana.org/administration/metrics/) - **Cluster Level Prometheus** @@ -218,7 +218,7 @@ This alert type monitors for the overload from Prometheus expression querying, i 1. Continue adding more **Alert Rule** to the group. -1. Finally, choose the [notifiers]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) to send the alerts to. +1. Finally, choose the [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) to send the alerts to. - You can set up multiple notifiers. - You can change notifier recipients on the fly. diff --git a/content/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts/_index.md index 13277b3fbc4..ea7f91ff0e0 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts/_index.md @@ -5,7 +5,7 @@ weight: 1 When you create a cluster, some alert rules are predefined. These alerts notify you about signs that the cluster could be unhealthy. You can receive these alerts if you configure a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) for them. -Several of the alerts use Prometheus expressions as the metric that triggers the alert. For more information on how expressions work, you can refer to the Rancher [documentation about Prometheus expressions]({{< baseurl >}} +Several of the alerts use Prometheus expressions as the metric that triggers the alert. For more information on how expressions work, you can refer to the Rancher [documentation about Prometheus expressions]({{}} /rancher/v2.x/en/cluster-admin/tools/monitoring/expression/) or the Prometheus [documentation about querying metrics](https://prometheus.io/docs/prometheus/latest/querying/basics/). # Alerts for etcd diff --git a/content/rancher/v2.x/en/cluster-admin/tools/logging/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/logging/_index.md index b1431bf3750..07c80a651cf 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/logging/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/logging/_index.md @@ -55,8 +55,8 @@ Logging Driver: json-file You can configure logging at either cluster level or project level. -- Cluster logging writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), it also writes logs for all the Kubernetes system components. -- [Project logging]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/logging/) writes logs for every pod in that particular project. +- Cluster logging writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), it also writes logs for all the Kubernetes system components. +- [Project logging]({{}}/rancher/v2.x/en/project-admin/tools/logging/) writes logs for every pod in that particular project. Logs that are sent to your logging service are from the following locations: @@ -65,7 +65,7 @@ Logs that are sent to your logging service are from the following locations: # Enabling Cluster Logging -As an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send Kubernetes logs to a logging service. +As an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send Kubernetes logs to a logging service. 1. From the **Global** view, navigate to the cluster that you want to configure cluster logging. @@ -73,11 +73,11 @@ As an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global 1. Select a logging service and enter the configuration. Refer to the specific service for detailed configuration. Rancher supports integration with the following services: - - [Elasticsearch]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/elasticsearch/) - - [Splunk]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/splunk/) - - [Kafka]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/kafka/) - - [Syslog]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/syslog/) - - [Fluentd]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/fluentd/) + - [Elasticsearch]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/elasticsearch/) + - [Splunk]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/splunk/) + - [Kafka]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/kafka/) + - [Syslog]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/syslog/) + - [Fluentd]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/fluentd/) 1. (Optional) Instead of using the UI to configure the logging services, you can enter custom advanced configurations by clicking on **Edit as File**, which is located above the logging targets. This link is only visible after you select a logging service. diff --git a/content/rancher/v2.x/en/cluster-admin/tools/logging/splunk/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/logging/splunk/_index.md index 00002ac3c71..0d4edcf49ba 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/logging/splunk/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/logging/splunk/_index.md @@ -55,10 +55,10 @@ If your instance of Splunk uses SSL, your **Endpoint** will need to begin with ` 1. Click on **Search & Reporting**. The number of **Indexed Events** listed should be increasing. 1. Click on Data Summary and select the Sources tab. - ![View Logs]({{< baseurl >}}/img/rancher/splunk/splunk4.jpg) + ![View Logs]({{}}/img/rancher/splunk/splunk4.jpg) 1. To view the actual logs, click on the source that you declared earlier. - ![View Logs]({{< baseurl >}}/img/rancher/splunk/splunk5.jpg) + ![View Logs]({{}}/img/rancher/splunk/splunk5.jpg) ## Troubleshooting diff --git a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/_index.md index ede960e2578..9e9703a2d32 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/_index.md @@ -33,29 +33,29 @@ Multi-tenancy support in terms of cluster-only and project-only Prometheus insta # Monitoring Scope -Using Prometheus, you can monitor Rancher at both the cluster level and [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/monitoring/). For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. +Using Prometheus, you can monitor Rancher at both the cluster level and [project level]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/). For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. - Cluster monitoring allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. - - [Kubernetes control plane]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#kubernetes-components-metrics) - - [etcd database]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#etcd-metrics) - - [All nodes (including workers)]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#cluster-metrics) + - [Kubernetes control plane]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#kubernetes-components-metrics) + - [etcd database]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#etcd-metrics) + - [All nodes (including workers)]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#cluster-metrics) -- [Project monitoring]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/monitoring/) allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. +- [Project monitoring]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/) allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. # Enabling Cluster Monitoring -As an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. +As an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. 1. From the **Global** view, navigate to the cluster that you want to configure cluster monitoring. 1. Select **Tools > Monitoring** in the navigation bar. -1. Select **Enable** to show the [Prometheus configuration options]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/prometheus/). Review the [resource consumption recommendations](#resource-consumption) to ensure you have enough resources for Prometheus and on your worker nodes to enable monitoring. Enter in your desired configuration options. +1. Select **Enable** to show the [Prometheus configuration options]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/prometheus/). Review the [resource consumption recommendations](#resource-consumption) to ensure you have enough resources for Prometheus and on your worker nodes to enable monitoring. Enter in your desired configuration options. 1. Click **Save**. -**Result:** The Prometheus server will be deployed as well as two monitoring applications. The two monitoring applications, `cluster-monitoring` and `monitoring-operator`, are added as an [application]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/) to the cluster's `system` project. After the applications are `active`, you can start viewing [cluster metrics]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/) through the [Rancher dashboard]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/#rancher-dashboard) or directly from [Grafana]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#grafana). +**Result:** The Prometheus server will be deployed as well as two monitoring applications. The two monitoring applications, `cluster-monitoring` and `monitoring-operator`, are added as an [application]({{}}/rancher/v2.x/en/catalog/apps/) to the cluster's `system` project. After the applications are `active`, you can start viewing [cluster metrics]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/) through the [Rancher dashboard]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/#rancher-dashboard) or directly from [Grafana]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#grafana). # Resource Consumption diff --git a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/_index.md index 14c797848cf..61c20f040c0 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/_index.md @@ -35,11 +35,11 @@ Some of the biggest metrics to look out for: 1. Click on **Node Metrics**. -[_Get expressions for Cluster Metrics_]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#cluster-metrics) +[_Get expressions for Cluster Metrics_]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#cluster-metrics) ### Etcd Metrics ->**Note:** Only supported for [Rancher launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). +>**Note:** Only supported for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). Etcd metrics display the operations of the etcd database on each of your cluster nodes. After establishing a baseline of normal etcd operational metrics, observe them for abnormal deltas between metric refreshes, which indicate potential issues with etcd. Always address etcd issues immediately! @@ -55,13 +55,13 @@ Some of the biggest metrics to look out for: If this statistic suddenly grows, it usually indicates network communication issues that constantly force the cluster to elect a new leader. -[_Get expressions for Etcd Metrics_]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#etcd-metrics) +[_Get expressions for Etcd Metrics_]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#etcd-metrics) ### Kubernetes Components Metrics Kubernetes components metrics display data about the cluster's individual Kubernetes components. Primarily, it displays information about connections and latency for each component: the API server, controller manager, scheduler, and ingress controller. ->**Note:** The metrics for the controller manager, scheduler and ingress controller are only supported for [Rancher launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). +>**Note:** The metrics for the controller manager, scheduler and ingress controller are only supported for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). When analyzing Kubernetes component metrics, don't be concerned about any single standalone metric in the charts and graphs that display. Rather, you should establish a baseline for metrics considered normal following a period of observation, e.g. the range of values that your components usually operate within and are considered normal. After you establish this baseline, be on the lookout for large deltas in the charts and graphs, as these big changes usually indicate a problem that you need to investigate. @@ -87,13 +87,13 @@ Some of the more important component metrics to monitor are: How fast ingress is routing connections to your cluster services. -[_Get expressions for Kubernetes Component Metrics_]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#kubernetes-components-metrics) +[_Get expressions for Kubernetes Component Metrics_]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#kubernetes-components-metrics) ## Rancher Logging Metrics -Although the Dashboard for a cluster primarily displays data sourced from Prometheus, it also displays information for cluster logging, provided that you have [configured Rancher to use a logging service]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/). +Although the Dashboard for a cluster primarily displays data sourced from Prometheus, it also displays information for cluster logging, provided that you have [configured Rancher to use a logging service]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/). -[_Get expressions for Rancher Logging Metrics_]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#rancher-logging-metrics) +[_Get expressions for Rancher Logging Metrics_]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#rancher-logging-metrics) ## Finding Workload Metrics @@ -110,4 +110,4 @@ Workload metrics display the hardware utilization for a Kubernetes workload. You - **View the Pod Metrics:** Click on **Pod Metrics**. - **View the Container Metrics:** In the **Containers** section, select a specific container and click on its name. Click on **Container Metrics**. -[_Get expressions for Workload Metrics_]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#workload-metrics) +[_Get expressions for Workload Metrics_]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#workload-metrics) diff --git a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/prometheus/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/prometheus/_index.md index 0f667bcd1a6..c5cadbc83aa 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/prometheus/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/prometheus/_index.md @@ -6,7 +6,7 @@ weight: 1 _Available as of v2.2.0_ -While configuring monitoring at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring) or [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring), there are multiple options that can be configured. +While configuring monitoring at either the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring) or [project level]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring), there are multiple options that can be configured. Option | Description -------|------------- @@ -20,7 +20,7 @@ Prometheus [CPU Reservation](https://kubernetes.io/docs/concepts/configuration/m Prometheus [Memory Limit](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) | Memory resource limit for the Prometheus pod. Prometheus [Memory Reservation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) | Memory resource requests for the Prometheus pod. Selector | Ability to select the nodes in which Prometheus and Grafana pods are deployed to. To use this option, the nodes must have labels. -Advanced Options | Since monitoring is an [application](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) from the [Rancher catalog]({{< baseurl >}}/rancher/v2.x/en/catalog/), it can be [configured like other catalog application]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/#configuration-options). _Warning: Any modification to the application without understanding the entire application can lead to catastrophic errors._ +Advanced Options | Since monitoring is an [application](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) from the [Rancher catalog]({{}}/rancher/v2.x/en/catalog/), it can be [configured like other catalog application]({{}}/rancher/v2.x/en/catalog/apps/#configuration-options). _Warning: Any modification to the application without understanding the entire application can lead to catastrophic errors._ ## Node Exporter @@ -32,8 +32,8 @@ When configuring Prometheus and enabling the node exporter, enter a host port in ## Persistent Storage ->**Prerequisite:** Configure one or more [storage class]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/#adding-storage-classes) to use as [persistent storage]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) for your Prometheus or Grafana pod. +>**Prerequisite:** Configure one or more [storage class]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/#adding-storage-classes) to use as [persistent storage]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) for your Prometheus or Grafana pod. By default, when you enable Prometheus for either a cluster or project, all monitoring data that Prometheus collects is stored on its own pod. With local storage, if the Prometheus or Grafana pods fail, all the data is lost. Rancher recommends configuring an external persistent storage to the cluster. With the external persistent storage, if the Prometheus or Grafana pods fail, the new pods can recover using data from the persistent storage. -When enabling persistent storage for Prometheus or Grafana, specify the size of the persistent volume and select the [storage class]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/#storage-classes). +When enabling persistent storage for Prometheus or Grafana, specify the size of the persistent volume and select the [storage class]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/#storage-classes). diff --git a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/_index.md index 28ccf295c9b..a1dd3946219 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/_index.md @@ -5,11 +5,11 @@ weight: 2 _Available as of v2.2.0_ -After you've enabled monitoring at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring) or [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring), you will want to be start viewing the data being collected. There are multiple ways to view this data. +After you've enabled monitoring at either the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring) or [project level]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring), you will want to be start viewing the data being collected. There are multiple ways to view this data. ## Rancher Dashboard ->**Note:** This is only available if you've enabled monitoring at the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring). Project specific analytics must be viewed using the project's Grafana instance. +>**Note:** This is only available if you've enabled monitoring at the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring). Project specific analytics must be viewed using the project's Grafana instance. Rancher's dashboards are available at multiple locations: @@ -33,13 +33,13 @@ When analyzing these metrics, don't be concerned about any single standalone met ## Grafana -If you've enabled monitoring at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring) or [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring), Rancher automatically creates a link to Grafana instance. Use this link to view monitoring data. +If you've enabled monitoring at either the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring) or [project level]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring), Rancher automatically creates a link to Grafana instance. Use this link to view monitoring data. Grafana allows you to query, visualize, alert, and ultimately, understand your cluster and workload data. For more information on Grafana and its capabilities, visit the [Grafana website](https://grafana.com/grafana). ### Authentication -Rancher determines which users can access the new Grafana instance, as well as the objects they can view within it, by validating them against the user's [cluster or project roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/). In other words, a user's access in Grafana mirrors their access in Rancher. +Rancher determines which users can access the new Grafana instance, as well as the objects they can view within it, by validating them against the user's [cluster or project roles]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/). In other words, a user's access in Grafana mirrors their access in Rancher. When you go to the Grafana instance, you will be logged in with the username `admin` and the password `admin`. If you log out and log in again, you will be prompted to change your password. You will only have access to the URL of the Grafana instance if you have access to view the corresponding metrics in Rancher. So for example, if your Rancher permissions are scoped to the project level, you won't be able to see the Grafana instance for cluster-level metrics. diff --git a/content/rancher/v2.x/en/cluster-admin/tools/notifiers/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/notifiers/_index.md index 107184659f6..1fc9c115451 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/notifiers/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/notifiers/_index.md @@ -74,8 +74,8 @@ _Available as of v2.2.0_ After creating a notifier, set up alerts to receive notifications of Rancher system events. -- [Cluster owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) can set up alerts at the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/alerts/). -- [Project owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can set up alerts at the [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/alerts/). +- [Cluster owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) can set up alerts at the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/). +- [Project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can set up alerts at the [project level]({{}}/rancher/v2.x/en/project-admin/tools/alerts/). ## Managing Notifiers diff --git a/content/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/_index.md b/content/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/_index.md index 2d7f62ea392..513d31f1fab 100644 --- a/content/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/_index.md @@ -3,7 +3,7 @@ title: Upgrading Kubernetes weight: 70 --- -> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) +> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) Following an upgrade to the latest version of Rancher, you can update your existing clusters to use the latest supported version of Kubernetes. @@ -11,7 +11,7 @@ Before a new version of Rancher is released, it's tested with the latest minor v As of Rancher v2.3.0, the Kubernetes metadata feature was added, which allows Rancher to ship Kubernetes patch versions without upgrading Rancher. For details, refer to the [section on Kubernetes metadata.]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata) ->**Recommended:** Before upgrading Kubernetes, [backup your cluster]({{< baseurl >}}/rancher/v2.x/en/backups). +>**Recommended:** Before upgrading Kubernetes, [backup your cluster]({{}}/rancher/v2.x/en/backups). 1. From the **Global** view, find the cluster for which you want to upgrade Kubernetes. Select **Vertical Ellipsis (...) > Edit**. diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md index bd1debc8674..8cd6adf86e2 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md @@ -16,7 +16,7 @@ To set up storage, follow these steps: ### Prerequisites -- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) +- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) - If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. ### 1. Set up persistent storage in an infrastructure provider diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/_index.md index 895e45a11ef..2fc9d2799df 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/_index.md @@ -10,5 +10,5 @@ Rancher supports persistent storage with a variety of volume plugins. However, b For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: -- [NFS]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/nfs/) -- [vSphere]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/vsphere/) +- [NFS]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/nfs/) +- [vSphere]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/vsphere/) diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md index c91713c4bb0..a9be8884a31 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md @@ -65,4 +65,4 @@ Before you can use the NFS storage volume plug-in with Rancher deployments, you ## What's Next? -Within Rancher, add the NFS server as a [storage volume]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#adding-a-persistent-volume) and/or [storage class]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#adding-storage-classes). After adding the server, you can use it for storage for your deployments. +Within Rancher, add the NFS server as a [storage volume]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#adding-a-persistent-volume) and/or [storage class]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#adding-storage-classes). After adding the server, you can use it for storage for your deployments. diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md index 8fcc55db032..0750143fe22 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md @@ -5,11 +5,11 @@ aliases: - /rancher/v2.x/en/tasks/clusters/adding-storage/provisioning-storage/vsphere/ --- -To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume [storage class]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#storage-classes). This practice dynamically provisions vSphere storage when workloads request volumes through a [persistent volume claim]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/). +To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume [storage class]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#storage-classes). This practice dynamically provisions vSphere storage when workloads request volumes through a [persistent volume claim]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/). ### Prerequisites -In order to provision vSphere volumes in a cluster created with the [Rancher Kubernetes Engine (RKE)]({{< baseurl>}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the [vSphere cloud provider]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/vsphere) must be explicitly enabled in the [cluster options]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/). +In order to provision vSphere volumes in a cluster created with the [Rancher Kubernetes Engine (RKE)]({{< baseurl>}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the [vSphere cloud provider]({{}}/rke/latest/en/config-options/cloud-providers/vsphere) must be explicitly enabled in the [cluster options]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/). ### Creating A Storage Class @@ -29,7 +29,7 @@ In order to provision vSphere volumes in a cluster created with the [Rancher Kub ### Creating a Workload with a vSphere Volume -1. From the cluster where you configured vSphere storage, begin creating a workload as you would in [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). +1. From the cluster where you configured vSphere storage, begin creating a workload as you would in [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). 2. For **Workload Type**, select **Stateful set of 1 pod**. 3. Expand the **Volumes** section and click **Add Volume**. 4. Choose **Add a new persistent volume (claim)**. This option will implicitly create the claim once you deploy the workload. @@ -54,7 +54,7 @@ In order to provision vSphere volumes in a cluster created with the [Rancher Kub 9. Once the replacement pod is running, click **Execute Shell**. 10. Inspect the contents of the directory where the volume is mounted by entering `ls -l /`. Note that the file you created earlier is still present. - ![workload-persistent-data]({{< baseurl >}}/img/rancher/workload-persistent-data.png) + ![workload-persistent-data]({{}}/img/rancher/workload-persistent-data.png) ## Why to Use StatefulSets Instead of Deployments diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md index a67c767cadd..a2565bd2b5b 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md @@ -16,7 +16,7 @@ To use an existing PV, your application will need to use a PVC that is bound to For dynamic storage provisioning, your application will need to use a PVC that is bound to a storage class. The storage class contains the authorization to provision new persistent volumes. -![Setting Up New and Existing Persistent Storage]({{< baseurl >}}/img/rancher/rancher-storage.svg) +![Setting Up New and Existing Persistent Storage]({{}}/img/rancher/rancher-storage.svg) For more information, refer to the [official Kubernetes documentation on storage](https://kubernetes.io/docs/concepts/storage/volumes/) diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md index 0672bbbf6ee..049a654217d 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md @@ -3,7 +3,7 @@ title: iSCSI Volumes weight: 6000 --- -In [Rancher Launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. +In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. Rancher Launched Kubernetes clusters storing data on iSCSI volumes leverage the [iSCSI initiator tool](http://www.open-iscsi.com/), which is embedded in the kubelet's `rancher/hyperkube` Docker image. From each kubelet (i.e., the _initiator_), the tool discovers and launches sessions with an iSCSI volume (i.e., the _target_). However, in some instances, the versions of the iSCSI initiator tool installed on the initiator and the target may not match, resulting in a connection failure. diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md index 05ecaf4f436..9aaffd5accc 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md @@ -66,7 +66,7 @@ These steps describe how to set up a PVC in the namespace where your stateful wo 1. Enter a **Name** for the volume claim. -1. Select the [Namespace]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) of the volume claim. +1. Select the [Namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) of the volume claim. 1. In the **Source** field, click **Use a Storage Class to provision a new persistent volume.** diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md index 8a5fc2495de..e2da323ee7a 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md @@ -24,9 +24,9 @@ Kubernetes Providers | Available as of | When using Rancher to create a cluster hosted by a provider, you are prompted for authentication information. This information is required to access the provider's API. For more information on how to obtain this information, see the following procedures: -- [Creating a GKE Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke) -- [Creating an EKS Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks) -- [Creating an AKS Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/aks) -- [Creating an ACK Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack) -- [Creating a TKE Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke) -- [Creating a CCE Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce) +- [Creating a GKE Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke) +- [Creating an EKS Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks) +- [Creating an AKS Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/aks) +- [Creating an ACK Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack) +- [Creating a TKE Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke) +- [Creating a CCE Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce) diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md index cb3951e4e68..32d75c76a00 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md @@ -6,7 +6,7 @@ weight: 2120 _Available as of v2.2.0_ -You can use Rancher to create a cluster hosted in Alibaba Cloud Kubernetes (ACK). Rancher has already implemented and packaged the [cluster driver]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/) for ACK, but by default, this cluster driver is `inactive`. In order to launch ACK clusters, you will need to [enable the ACK cluster driver]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning ACK clusters. +You can use Rancher to create a cluster hosted in Alibaba Cloud Kubernetes (ACK). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/) for ACK, but by default, this cluster driver is `inactive`. In order to launch ACK clusters, you will need to [enable the ACK cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning ACK clusters. ## Prerequisites diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md index 39bb5c1c44b..f01af1c27b3 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md @@ -6,7 +6,7 @@ weight: 2130 _Available as of v2.2.0_ -You can use Rancher to create a cluster hosted in Huawei Cloud Container Engine (CCE). Rancher has already implemented and packaged the [cluster driver]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/) for CCE, but by default, this cluster driver is `inactive`. In order to launch CCE clusters, you will need to [enable the CCE cluster driver]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning CCE clusters. +You can use Rancher to create a cluster hosted in Huawei Cloud Container Engine (CCE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/) for CCE, but by default, this cluster driver is `inactive`. In order to launch CCE clusters, you will need to [enable the CCE cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning CCE clusters. ## Prerequisites in Huawei diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md index d3a3af145b5..e93fef1472a 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md @@ -38,7 +38,7 @@ For more detailed information on IAM policies for EKS, refer to the official [do The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by RKE and another created by EKS. -![Rancher architecture with EKS hosted cluster]({{< baseurl >}}/img/rancher/rancher-architecture.svg) +![Rancher architecture with EKS hosted cluster]({{}}/img/rancher/rancher-architecture.svg) ## Create the EKS Cluster diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md index c3f8087e741..dc6c66b9efb 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md @@ -6,7 +6,7 @@ weight: 2125 _Available as of v2.2.0_ -You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE). Rancher has already implemented and packaged the [cluster driver]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/) for TKE, but by default, this cluster driver is `inactive`. In order to launch TKE clusters, you will need to [enable the TKE cluster driver]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning TKE clusters. +You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/) for TKE, but by default, this cluster driver is `inactive`. In order to launch TKE clusters, you will need to [enable the TKE cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning TKE clusters. ## Prerequisites in Tencent diff --git a/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md b/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md index da12ee46111..3722a97e451 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md @@ -7,7 +7,7 @@ This section describes the roles for etcd nodes, controlplane nodes, and worker This diagram is applicable to Kubernetes clusters [launched with Rancher using RKE.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). -![Cluster diagram]({{< baseurl >}}/img/rancher/clusterdiagram.svg)
+![Cluster diagram]({{}}/img/rancher/clusterdiagram.svg)
Lines show the traffic flow between components. Colors are used purely for visual aid # etcd diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md index 9835c53a18c..c964b58e162 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md @@ -53,18 +53,18 @@ Provision the host according to the [installation requirements]({{}}/ra >**Using Windows nodes as Kubernetes workers?** > - >- See [Enable the Windows Support Option]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/#enable-the-windows-support-option). - >- The only Network Provider available for clusters with Windows support is Flannel. See [Networking Option]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/#networking-option). + >- See [Enable the Windows Support Option]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/#enable-the-windows-support-option). + >- The only Network Provider available for clusters with Windows support is Flannel. See [Networking Option]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/#networking-option). 6.
Click **Next**. 7. From **Node Role**, choose the roles that you want filled by a cluster node. >**Notes:** > - >- Using Windows nodes as Kubernetes workers? See [Node Configuration]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/#node-configuration). + >- Using Windows nodes as Kubernetes workers? See [Node Configuration]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/#node-configuration). >- Bare-Metal Server Reminder: If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). -8. **Optional**: Click **[Show advanced options]({{< baseurl >}}/rancher/v2.x/en/admin-settings/agent-options/)** to specify IP address(es) to use when registering the node, override the hostname of the node, or to add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to the node. +8. **Optional**: Click **[Show advanced options]({{}}/rancher/v2.x/en/admin-settings/agent-options/)** to specify IP address(es) to use when registering the node, override the hostname of the node, or to add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to the node. 9. Copy the command displayed on screen to your clipboard. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/_index.md index 0d7a0e5ab67..7d0c6c6e8e4 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/_index.md @@ -46,7 +46,7 @@ Since taints can be added at a node template and node pool, if there is no confl Using Rancher, you can create pools of nodes based on a [node template](#node-templates). The benefit of using a node pool is that if a node is destroyed or deleted, you can increase the number of live nodes to compensate for the node that was lost. The node pool helps you ensure that the count of the node pool is as expected. -Each node pool is assigned with a [node component]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) to specify how these nodes should be configured for the Kubernetes cluster. +Each node pool is assigned with a [node component]({{}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) to specify how these nodes should be configured for the Kubernetes cluster. ### Node Pool Taints @@ -112,9 +112,9 @@ Node templates can use cloud credentials to store credentials for launching node - Multiple node templates can share the same cloud credential to create node pools. If your key is compromised or expired, the cloud credential can be updated in a single place, which allows all node templates that are using it to be updated at once. -> **Note:** As of v2.2.0, the default `active` [node drivers]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use cloud credentials. If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. +> **Note:** As of v2.2.0, the default `active` [node drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use cloud credentials. If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. -After cloud credentials are created, the user can start [managing the cloud credentials that they created]({{< baseurl >}}/rancher/v2.x/en/user-settings/cloud-credentials/). +After cloud credentials are created, the user can start [managing the cloud credentials that they created]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/). # Node Drivers diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md index 94a58722719..4754951afde 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md @@ -11,7 +11,7 @@ Use Rancher to create a Kubernetes cluster in Amazon EC2. - **AWS EC2 Access Key and Secret Key** that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. - **IAM Policy created** to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: - [Example IAM Policy](#example-iam-policy) - - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) + - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) - **IAM Policy added as Permission** to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. @@ -99,7 +99,7 @@ Optional: In the **Engine Options** section of the node template, you can config - **Security Groups** creates or configures the Security Groups applied to your nodes. Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#security-group-for-nodes-on-aws-ec2) to see what rules are created in the `rancher-nodes` Security Group. - **Instance** configures the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI.

- If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. + If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. 1. {{< step_rancher-template >}} 1. Click **Create**. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md index ecee7787ccc..ce0cfc1ac5f 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md @@ -162,7 +162,7 @@ Only VMs booting from RancherOS ISO are supported. Ensure that the [OS ISO URL](#instance-options) contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. - ![image]({{< baseurl >}}/img/rancher/vsphere-node-template-1.png) + ![image]({{}}/img/rancher/vsphere-node-template-1.png) {{% /tab %}} {{% /tabs %}} diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/_index.md index cdc3d70e232..4b6dfcf0339 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/_index.md @@ -48,7 +48,7 @@ The options for creating and configuring an instance are different depending on | Creation method | * | The method for setting up an operating system on the node. The operating system can be installed from an ISO or from a VM template. Depending on the creation method, you will also have to specify a VM template, content library, existing VM, or ISO. For more information on creation methods, refer to the section on [configuring instances.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#c-configure-instances-and-operating-systems) | | Cloud Init | | URL of a `cloud-config.yml` file or URL to provision VMs with. This file allows further customization of the operating system, such as network configuration, DNS servers, or system daemons. The operating system must support `cloud-init`. | | Networks | | Name(s) of the network to attach the VM to. | -| Configuration Parameters used for guestinfo | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | +| Configuration Parameters used for guestinfo | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | {{% /tab %}} {{% tab "Rancher prior to v2.3.3" %}} @@ -58,9 +58,9 @@ The options for creating and configuring an instance are different depending on | CPUs | * | Number of vCPUS to assign to VMs. | | Memory | * | Amount of memory to assign to VMs. | | Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| +| Cloud Init | | URL of a [RancherOS cloud-config]({{}}/os/v1.x/en/installation/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| | OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | +| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | {{% /tab %}} {{% /tabs %}} diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md index bd6c563029c..20922c7d3b2 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md @@ -36,7 +36,7 @@ This section is a cluster configuration reference, covering the following topics # Rancher UI Options -When creating a cluster using one of the options described in [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), you can configure basic Kubernetes options using the **Cluster Options** section. +When creating a cluster using one of the options described in [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), you can configure basic Kubernetes options using the **Cluster Options** section. ### Kubernetes Version @@ -44,7 +44,7 @@ The version of Kubernetes installed on your cluster nodes. Rancher packages its ### Network Provider -The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ]({{< baseurl >}}/rancher/v2.x/en/faq/networking/cni-providers/). +The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ]({{}}/rancher/v2.x/en/faq/networking/cni-providers/). >**Note:** After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. @@ -57,9 +57,9 @@ Out of the box, Rancher is compatible with the following network providers: **Notes on Canal:** -In v2.0.0 - v2.0.4 and v2.0.6, this was the default option for these clusters was Canal with network isolation. With the network isolation automatically enabled, it prevented any pod communication between [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). +In v2.0.0 - v2.0.4 and v2.0.6, this was the default option for these clusters was Canal with network isolation. With the network isolation automatically enabled, it prevented any pod communication between [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). -As of v2.0.7, if you use Canal, you also have the option of using **Project Network Isolation**, which will enable or disable communication between pods in different [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). +As of v2.0.7, if you use Canal, you also have the option of using **Project Network Isolation**, which will enable or disable communication between pods in different [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). >**Attention Rancher v2.0.0 - v2.0.6 Users** > @@ -72,13 +72,13 @@ In v2.0.5, this was the default option, which did not prevent any network isolat **Notes on Weave:** -When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) and the [Weave Network Plug-in Options]({{< baseurl >}}/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). +When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) and the [Weave Network Plug-in Options]({{}}/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). ### Kubernetes Cloud Providers You can configure a [Kubernetes cloud provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers). If you want to use [volumes and storage]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. ->**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. +>**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation]({{}}/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. If you want to see all the configuration options for a cluster, please click **Show advanced options** on the bottom right. The advanced options are described below: @@ -119,7 +119,7 @@ The following options are available when you create clusters in the Rancher UI. ### NGINX Ingress -Option to enable or disable the [NGINX ingress controller]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/). +Option to enable or disable the [NGINX ingress controller]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/). ### Node Port Range @@ -127,15 +127,15 @@ Option to change the range of ports that can be used for [NodePort services](htt ### Metrics Server Monitoring -Option to enable or disable [Metrics Server]({{< baseurl >}}/rke/latest/en/config-options/add-ons/metrics-server/). +Option to enable or disable [Metrics Server]({{}}/rke/latest/en/config-options/add-ons/metrics-server/). ### Pod Security Policy Support -Option to enable and select a default [Pod Security Policy]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies). You must have an existing Pod Security Policy configured before you can use this option. +Option to enable and select a default [Pod Security Policy]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies). You must have an existing Pod Security Policy configured before you can use this option. ### Docker Version on Nodes -Option to require [a supported Docker version]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/) installed on the cluster nodes that are added to the cluster, or to allow unsupported Docker versions installed on the cluster nodes. +Option to require [a supported Docker version]({{}}/rancher/v2.x/en/installation/requirements/) installed on the cluster nodes that are added to the cluster, or to allow unsupported Docker versions installed on the cluster nodes. ### Docker Root Directory @@ -143,7 +143,7 @@ If the nodes you are adding to the cluster have Docker configured with a non-def ### Recurring etcd Snapshots -Option to enable or disable [recurring etcd snapshots]({{< baseurl >}}/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). +Option to enable or disable [recurring etcd snapshots]({{}}/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). # Cluster Config File @@ -154,7 +154,7 @@ Instead of using the Rancher UI to choose Kubernetes options for the cluster, ad - To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. - To read from an existing RKE file, click **Read from a file**. -![image]({{< baseurl >}}/img/rancher/cluster-options-yaml.png) +![image]({{}}/img/rancher/cluster-options-yaml.png) The structure of the config file is different depending on your version of Rancher. Below are example config files for Rancher v2.0.0-v2.2.x and for Rancher v2.3.0+. @@ -341,7 +341,7 @@ ssh_agent_auth: false ### Default DNS provider -The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider]({{< baseurl >}}/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. +The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider]({{}}/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. | Rancher version | Kubernetes version | Default DNS provider | |-------------|--------------------|----------------------| @@ -361,7 +361,7 @@ See [Docker Root Directory](#docker-root-directory). ### enable_cluster_monitoring -Option to enable or disable [Cluster Monitoring]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/). +Option to enable or disable [Cluster Monitoring]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/). ### enable_network_policy diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md index c9680df8478..009fca03abb 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md @@ -10,7 +10,7 @@ _Pod Security Policies_ are objects that control security-sensitive aspects of p When you create a new cluster with RKE, you can configure it to apply a PSP immediately. As you create the cluster, use the **Cluster Options** to enable a PSP. The PSP assigned to the cluster will be the default PSP for projects within the cluster. >**Prerequisite:** ->Create a Pod Security Policy within Rancher. Before you can assign a default PSP to a new cluster, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/). +>Create a Pod Security Policy within Rancher. Before you can assign a default PSP to a new cluster, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/). >**Note:** >For security purposes, we recommend assigning a PSP as you create your clusters. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md index de3e9ba5058..0c5c967613c 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md @@ -12,11 +12,11 @@ For a conceptual overview of how the Rancher server provisions clusters and comm ### cattle-cluster-agent -The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. The `cattle-cluster-agent` is deployed using a Deployment resource. +The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. The `cattle-cluster-agent` is deployed using a Deployment resource. ### cattle-node-agent -The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters when `cattle-cluster-agent` is unavailable. +The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters when `cattle-cluster-agent` is unavailable. > **Note:** In Rancher v2.2.4 and lower, the `cattle-node-agent` pods did not tolerate all taints, causing Kubernetes upgrades to fail on these nodes. The fix for this has been included in Rancher v2.2.5 and higher. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md index 337a4452bcc..f6bec63c232 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md @@ -5,7 +5,7 @@ weight: 2240 _Available as of v2.3.0_ -When provisioning a [custom cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes custom cluster on your existing infrastructure. +When provisioning a [custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes custom cluster on your existing infrastructure. You can use a mix of Linux and Windows hosts as your cluster nodes. Windows nodes can only be used for deploying workloads, while Linux nodes are required for cluster management. @@ -32,13 +32,13 @@ This guide covers the following topics: # Prerequisites -Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation]({{< baseurl >}}/rancher/v2.x/en/installation/) before proceeding with this guide. +Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation]({{}}/rancher/v2.x/en/installation/) before proceeding with this guide. > **Note on Cloud Providers:** If you set a Kubernetes cloud provider in your cluster, some additional steps are required. You might want to set a cloud provider if you want to want to leverage a cloud provider's capabilities, for example, to automatically provision storage, load balancers, or other infrastructure for your cluster. Refer to [this page]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) for details on how to configure a cloud provider cluster of nodes that meet the prerequisites. # Requirements for Windows Clusters -For a custom cluster, the general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/). +For a custom cluster, the general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation]({{}}/rancher/v2.x/en/installation/requirements/). ### OS and Docker Requirements @@ -84,9 +84,9 @@ We recommend the minimum three-node architecture listed in the table below, but | Node | Operating System | Kubernetes Cluster Role(s) | Purpose | | ------ | --------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| Node 1 | Linux (Ubuntu Server 18.04 recommended) | [Control Plane]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#control-plane-nodes), [etcd]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#etcd-nodes), [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Manage the Kubernetes cluster | -| Node 2 | Linux (Ubuntu Server 18.04 recommended) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Support the Rancher Cluster agent, Metrics server, DNS, and Ingress for the cluster | -| Node 3 | Windows (Windows Server core version 1809 or above) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Run your Windows containers | +| Node 1 | Linux (Ubuntu Server 18.04 recommended) | [Control Plane]({{}}/rancher/v2.x/en/cluster-provisioning/#control-plane-nodes), [etcd]({{}}/rancher/v2.x/en/cluster-provisioning/#etcd-nodes), [Worker]({{}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Manage the Kubernetes cluster | +| Node 2 | Linux (Ubuntu Server 18.04 recommended) | [Worker]({{}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Support the Rancher Cluster agent, Metrics server, DNS, and Ingress for the cluster | +| Node 3 | Windows (Windows Server core version 1809 or above) | [Worker]({{}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Run your Windows containers | ### Container Requirements @@ -130,11 +130,11 @@ You will provision three nodes: | Node 2 | Linux (Ubuntu Server 18.04 recommended) | | Node 3 | Windows (Windows Server core version 1809 or above required) | -If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) +If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) # 2. Create the Custom Cluster -The instructions for creating a custom cluster that supports Windows nodes are very similar to the general [instructions for creating a custom cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#2-create-the-custom-cluster) with some Windows-specific requirements. +The instructions for creating a custom cluster that supports Windows nodes are very similar to the general [instructions for creating a custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#2-create-the-custom-cluster) with some Windows-specific requirements. Windows support only be enabled if the cluster uses Kubernetes v1.15+ and the Flannel network provider. @@ -170,7 +170,7 @@ In this section, we fill out a form on the Rancher UI to get a custom command to 1. In the **Node Role** section, choose at least **etcd** and **Control Plane**. We recommend selecting all three. -1. Optional: If you click **Show advanced options,** you can customize the settings for the [Rancher agent]({{< baseurl >}}/rancher/v2.x/en/admin-settings/agent-options/) and [node labels.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) +1. Optional: If you click **Show advanced options,** you can customize the settings for the [Rancher agent]({{}}/rancher/v2.x/en/admin-settings/agent-options/) and [node labels.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) 1. Copy the command displayed on the screen to your clipboard. @@ -239,11 +239,11 @@ After creating your cluster, you can access it through the Rancher UI. As a best # Configuration for Storage Classes in Azure -If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a [storage class]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/#adding-storage-classes) for the cluster. +If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a [storage class]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/#adding-storage-classes) for the cluster. In order to have the Azure platform create the required storage resources, follow these steps: -1. [Configure the Azure cloud provider.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/#azure) +1. [Configure the Azure cloud provider.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/#azure) 1. Configure `kubectl` to connect to your cluster. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md index e9986f6abae..988427179b4 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md @@ -5,9 +5,9 @@ weight: 9100 _Available from v2.1.0 to v2.1.9 and v2.2.0 to v2.2.3_ -This section describes how to provision Windows clusters in Rancher v2.1.x and v2.2.x. If you are using Rancher v2.3.0 or later, please refer to the new documentation for [v2.3.0 or later]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/). +This section describes how to provision Windows clusters in Rancher v2.1.x and v2.2.x. If you are using Rancher v2.3.0 or later, please refer to the new documentation for [v2.3.0 or later]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/). -When you create a [custom cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/), Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes cluster on your existing infrastructure. +When you create a [custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/), Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes cluster on your existing infrastructure. You can provision a custom Windows cluster using Rancher by using a mix of Linux and Windows hosts as your cluster nodes. @@ -43,23 +43,23 @@ When setting up a custom cluster with support for Windows nodes and containers, ## 1. Provision Hosts -To begin provisioning a custom cluster with Windows support, prepare your host servers. Provision three nodes according to our [requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/)—two Linux, one Windows. Your hosts can be: +To begin provisioning a custom cluster with Windows support, prepare your host servers. Provision three nodes according to our [requirements]({{}}/rancher/v2.x/en/installation/requirements/)—two Linux, one Windows. Your hosts can be: - Cloud-hosted VMs - VMs from virtualization clusters - Bare-metal servers -The table below lists the [Kubernetes roles]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) you'll assign to each host, although you won't enable these roles until further along in the configuration process—we're just informing you of each node's purpose. The first node, a Linux host, is primarily responsible for managing the Kubernetes control plane, although, in this use case, we're installing all three roles on this node. Node 2 is also a Linux worker, which is responsible for Ingress support. Finally, the third node is your Windows worker, which will run your Windows applications. +The table below lists the [Kubernetes roles]({{}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) you'll assign to each host, although you won't enable these roles until further along in the configuration process—we're just informing you of each node's purpose. The first node, a Linux host, is primarily responsible for managing the Kubernetes control plane, although, in this use case, we're installing all three roles on this node. Node 2 is also a Linux worker, which is responsible for Ingress support. Finally, the third node is your Windows worker, which will run your Windows applications. Node | Operating System | Future Cluster Role(s) --------|------------------|------ -Node 1 | Linux (Ubuntu Server 16.04 recommended) | [Control Plane]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#control-plane-nodes), [etcd]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#etcd), [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) -Node 2 | Linux (Ubuntu Server 16.04 recommended) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) (This node is used for Ingress support) -Node 3 | Windows (Windows Server core version 1809 or above) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) +Node 1 | Linux (Ubuntu Server 16.04 recommended) | [Control Plane]({{}}/rancher/v2.x/en/cluster-provisioning/#control-plane-nodes), [etcd]({{}}/rancher/v2.x/en/cluster-provisioning/#etcd), [Worker]({{}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) +Node 2 | Linux (Ubuntu Server 16.04 recommended) | [Worker]({{}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) (This node is used for Ingress support) +Node 3 | Windows (Windows Server core version 1809 or above) | [Worker]({{}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) ### Requirements -- You can view node requirements for Linux and Windows nodes in the [installation section]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/). +- You can view node requirements for Linux and Windows nodes in the [installation section]({{}}/rancher/v2.x/en/installation/requirements/). - All nodes in a virtualization cluster or a bare metal cluster must be connected using a layer 2 network. - To support [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), your cluster must include at least one Linux node dedicated to the worker role. - Although we recommend the three node architecture listed in the table above, you can add additional Linux and Windows workers to scale up your cluster for redundancy. @@ -79,20 +79,20 @@ Azure VM | [Enable or Disable IP Forwarding](https://docs.microsoft.com/en-us/az ## 3. Create the Custom Cluster -To create a custom cluster that supports Windows nodes, follow the instructions in [Creating a Cluster with Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#2-create-the-custom-cluster), starting from [2. Create the Custom Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#2-create-the-custom-cluster). While completing the linked instructions, look for steps that requires special actions for Windows nodes, which are flagged with a note. These notes will link back here, to the special Windows instructions listed in the subheadings below. +To create a custom cluster that supports Windows nodes, follow the instructions in [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#2-create-the-custom-cluster), starting from [2. Create the Custom Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#2-create-the-custom-cluster). While completing the linked instructions, look for steps that requires special actions for Windows nodes, which are flagged with a note. These notes will link back here, to the special Windows instructions listed in the subheadings below. ### Enable the Windows Support Option While choosing **Cluster Options**, set **Windows Support (Experimental)** to **Enabled**. -After you select this option, resume [Creating a Cluster with Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#create-the-custom-cluster) from [step 6]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#step-6). +After you select this option, resume [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#create-the-custom-cluster) from [step 6]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#step-6). ### Networking Option When choosing a network provider for a cluster that supports Windows, the only option available is Flannel, as [host-gw](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) is needed for IP routing. -If your nodes are hosted by a cloud provider and you want automation support such as load balancers or persistent storage devices, see [Selecting Cloud Providers]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) for configuration info. +If your nodes are hosted by a cloud provider and you want automation support such as load balancers or persistent storage devices, see [Selecting Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) for configuration info. ### Node Configuration @@ -103,7 +103,7 @@ Option | Setting Node Operating System | Linux Node Roles | etcd
Control Plane
Worker -When you're done with these configurations, resume [Creating a Cluster with Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#create-the-custom-cluster) from [step 8]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#step-8). +When you're done with these configurations, resume [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#create-the-custom-cluster) from [step 8]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#step-8). diff --git a/content/rancher/v2.x/en/contributing/_index.md b/content/rancher/v2.x/en/contributing/_index.md index 3965c2e7783..1cbf8bd694e 100644 --- a/content/rancher/v2.x/en/contributing/_index.md +++ b/content/rancher/v2.x/en/contributing/_index.md @@ -38,7 +38,7 @@ loglevel repository | https://github.com/rancher/loglevel | This repository is t To see all libraries/projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. -![Rancher diagram]({{< baseurl >}}/img/rancher/ranchercomponentsdiagram.svg)
+![Rancher diagram]({{}}/img/rancher/ranchercomponentsdiagram.svg)
Rancher components used for provisioning/managing Kubernetes clusters. # Building diff --git a/content/rancher/v2.x/en/faq/networking/_index.md b/content/rancher/v2.x/en/faq/networking/_index.md index ef4a030f7a8..863ad97169d 100644 --- a/content/rancher/v2.x/en/faq/networking/_index.md +++ b/content/rancher/v2.x/en/faq/networking/_index.md @@ -5,5 +5,5 @@ weight: 8005 Networking FAQ's -- [CNI Providers]({{< baseurl >}}/rancher/v2.x/en/faq/networking/cni-providers/) +- [CNI Providers]({{}}/rancher/v2.x/en/faq/networking/cni-providers/) diff --git a/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md b/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md index 08ae7cf4f70..ec07fe5018d 100644 --- a/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md +++ b/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md @@ -10,7 +10,7 @@ CNI (Container Network Interface), a [Cloud Native Computing Foundation project] Kubernetes uses CNI as an interface between network providers and Kubernetes pod networking. -![CNI Logo]({{< baseurl >}}/img/rancher/cni-logo.png) +![CNI Logo]({{}}/img/rancher/cni-logo.png) For more information visit [CNI GitHub project](https://github.com/containernetworking/cni). @@ -28,7 +28,7 @@ This network model is used when an extended L2 bridge is preferred. This network CNI network providers using this network model include Flannel, Canal, and Weave. -![Encapsulated Network]({{< baseurl >}}/img/rancher/encapsulated-network.png) +![Encapsulated Network]({{}}/img/rancher/encapsulated-network.png) #### What is an Unencapsulated Network? @@ -40,7 +40,7 @@ This network model is used when a routed L3 network is preferred. This mode dyna CNI network providers using this network model include Calico and Romana. -![Unencapsulated Network]({{< baseurl >}}/img/rancher/unencapsulated-network.png) +![Unencapsulated Network]({{}}/img/rancher/unencapsulated-network.png) ### What CNI Providers are Provided by Rancher? @@ -48,7 +48,7 @@ Out-of-the-box, Rancher provides the following CNI network providers for Kuberne #### Canal -![Canal Logo]({{< baseurl >}}/img/rancher/canal-logo.png) +![Canal Logo]({{}}/img/rancher/canal-logo.png) Canal is a CNI network provider that gives you the best of Flannel and Calico. It allows users to easily deploy Calico and Flannel networking together as a unified networking solution, combining Calico’s network policy enforcement with the rich superset of Calico (unencapsulated) and/or Flannel (encapsulated) network connectivity options. @@ -62,7 +62,7 @@ For more information, see the [Canal GitHub Page.](https://github.com/projectcal #### Flannel -![Flannel Logo]({{< baseurl >}}/img/rancher/flannel-logo.png) +![Flannel Logo]({{}}/img/rancher/flannel-logo.png) Flannel is a simple and easy way to configure L3 network fabric designed for Kubernetes. Flannel runs a single binary agent named flanneld on each host, which is responsible for allocating a subnet lease to each host out of a larger, preconfigured address space. Flannel uses either the Kubernetes API or etcd directly to store the network configuration, the allocated subnets, and any auxiliary data (such as the host's public IP). Packets are forwarded using one of several backend mechanisms, with the default encapsulation being [VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). @@ -70,13 +70,13 @@ Encapsulated traffic is unencrypted by default. Therefore, flannel provides an e Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [the port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for more details. -![Flannel Diagram]({{< baseurl >}}/img/rancher/flannel-diagram.png) +![Flannel Diagram]({{}}/img/rancher/flannel-diagram.png) For more information, see the [Flannel GitHub Page](https://github.com/coreos/flannel). #### Calico -![Calico Logo]({{< baseurl >}}/img/rancher/calico-logo.png) +![Calico Logo]({{}}/img/rancher/calico-logo.png) Calico enables networking and network policy in Kubernetes clusters across the cloud. Calico uses a pure, unencapsulated IP network fabric and policy engine to provide networking for your Kubernetes workloads. Workloads are able to communicate over both cloud infrastructure and on-premise using BGP. @@ -84,7 +84,7 @@ Calico also provides a stateless IP-in-IP encapsulation mode that can be used, i Kubernetes workers should open TCP port `179` (BGP). See [the port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for more details. -![Calico Diagram]({{< baseurl >}}/img/rancher/calico-diagram.svg) +![Calico Diagram]({{}}/img/rancher/calico-diagram.svg) For more information, see the following pages: @@ -94,7 +94,7 @@ For more information, see the following pages: #### Weave -![Weave Logo]({{< baseurl >}}/img/rancher/weave-logo.png) +![Weave Logo]({{}}/img/rancher/weave-logo.png) _Available as of v2.2.0_ @@ -151,4 +151,4 @@ As of Rancher v2.0.7, Canal is the default CNI network provider. We recommend it ### How can I configure a CNI network provider? -Please see [Cluster Options]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) and the options for [Network Plug-ins]({{< baseurl >}}/rke/latest/en/config-options/add-ons/network-plugins/). +Please see [Cluster Options]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) and the options for [Network Plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/). diff --git a/content/rancher/v2.x/en/faq/security/_index.md b/content/rancher/v2.x/en/faq/security/_index.md index 733b79dbf05..f9d6ec86452 100644 --- a/content/rancher/v2.x/en/faq/security/_index.md +++ b/content/rancher/v2.x/en/faq/security/_index.md @@ -6,10 +6,10 @@ weight: 8007 **Is there a Hardening Guide?** -The Hardening Guide is now located in the main [Security]({{< baseurl >}}/rancher/v2.x/en/security/) section. +The Hardening Guide is now located in the main [Security]({{}}/rancher/v2.x/en/security/) section.
**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** -We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security]({{< baseurl >}}/rancher/v2.x/en/security/) section. +We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security]({{}}/rancher/v2.x/en/security/) section. diff --git a/content/rancher/v2.x/en/faq/technical/_index.md b/content/rancher/v2.x/en/faq/technical/_index.md index e901475ca57..d5b22464771 100644 --- a/content/rancher/v2.x/en/faq/technical/_index.md +++ b/content/rancher/v2.x/en/faq/technical/_index.md @@ -116,7 +116,7 @@ Node Templates can be accessed by opening your account menu (top right) and sele ### Why is my Layer-4 Load Balancer in `Pending` state? -The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) +The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) ### Where is the state of Rancher stored? @@ -131,7 +131,7 @@ We follow the validated Docker versions for upstream Kubernetes releases. The va SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ellipsis button at the end of the row, and choose **Download Keys** as shown in the picture below. -![Download Keys]({{< baseurl >}}/img/rancher/downloadsshkeys.png) +![Download Keys]({{}}/img/rancher/downloadsshkeys.png) Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. Be sure to use the correct username (`rancher` or `docker` for RancherOS, `ubuntu` for Ubuntu, `ec2-user` for Amazon Linux) @@ -150,13 +150,13 @@ The UI consists of static files, and works based on responses of the API. That m A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. -When the IP address of the node changed, Rancher lost connection to the node, so it will be unable to clean the node properly. See [Cleaning cluster nodes]({{< baseurl >}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) to clean the node. +When the IP address of the node changed, Rancher lost connection to the node, so it will be unable to clean the node properly. See [Cleaning cluster nodes]({{}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) to clean the node. When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. ### How can I add additional arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? -You can add additional arguments/binds/environment variables via the [Config File]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables]({{< baseurl >}}/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls]({{< baseurl >}}/rke/latest/en/example-yamls/). +You can add additional arguments/binds/environment variables via the [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables]({{}}/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls]({{}}/rke/latest/en/example-yamls/). ### How do I check if my certificate chain is valid? diff --git a/content/rancher/v2.x/en/k8s-in-rancher/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/_index.md index 5b112b5725e..71830fc1f00 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/_index.md @@ -7,19 +7,19 @@ aliases: - /rancher/v2.x/en/concepts/resources/ --- -When your project is set up, [project members]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can start managing their applications and all the components that comprise it. +When your project is set up, [project members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can start managing their applications and all the components that comprise it. ## Workloads -Deploy applications to your cluster nodes using [workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/), which are objects that contain pods that run your apps, along with metadata that set rules for the deployment's behavior. Workloads can be deployed within the scope of the entire clusters or within a namespace. +Deploy applications to your cluster nodes using [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/), which are objects that contain pods that run your apps, along with metadata that set rules for the deployment's behavior. Workloads can be deployed within the scope of the entire clusters or within a namespace. -When deploying a workload, you can deploy from any image. There are a variety of [workload types]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/#workload-types) to choose from which determine how your application should run. +When deploying a workload, you can deploy from any image. There are a variety of [workload types]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/#workload-types) to choose from which determine how your application should run. Following a workload deployment, you can continue working with it. You can: -- [Upgrade]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads) the workload to a newer version of the application it's running. -- [Roll back]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads) a workload to a previous version, if an issue occurs during upgrade. -- [Add a sidecar]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar), which is a workload that supports a primary workload. +- [Upgrade]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads) the workload to a newer version of the application it's running. +- [Roll back]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads) a workload to a previous version, if an issue occurs during upgrade. +- [Add a sidecar]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar), which is a workload that supports a primary workload. ## Load Balancing and Ingress @@ -31,10 +31,10 @@ If you want your applications to be externally accessible, you must add a load b Rancher supports two types of load balancers: -- [Layer-4 Load Balancers]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) -- [Layer-7 Load Balancers]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) +- [Layer-4 Load Balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) +- [Layer-7 Load Balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) -For more information, see [load balancers]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). +For more information, see [load balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). #### Ingress @@ -42,29 +42,29 @@ Load Balancers can only handle one IP address per service, which means if you ru Ingress is a set or rules that act as a load balancer. Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster program the load balancer to direct the request to the correct service based on service subdomains or path rules that you've configured. -For more information, see [Ingress]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress). +For more information, see [Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress). When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. -For more information, see [Global DNS]({{< baseurl >}}/rancher/v2.x/en/catalog/globaldns/). +For more information, see [Global DNS]({{}}/rancher/v2.x/en/catalog/globaldns/). ## Service Discovery After you expose your cluster to external requests using a load balancer and/or ingress, it's only available by IP address. To create a resolveable hostname, you must create a service record, which is a record that maps an IP address, external hostname, DNS record alias, workload(s), or labelled pods to a specific hostname. -For more information, see [Service Discovery]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/service-discovery). +For more information, see [Service Discovery]({{}}/rancher/v2.x/en/k8s-in-rancher/service-discovery). ## Pipelines -After your project has been [configured to a version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/pipelines/#version-control-providers), you can add the repositories and start configuring a pipeline for each repository. +After your project has been [configured to a version control provider]({{}}/rancher/v2.x/en/project-admin/pipelines/#version-control-providers), you can add the repositories and start configuring a pipeline for each repository. -For more information, see [Pipelines]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/). +For more information, see [Pipelines]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/). ## Applications Besides launching individual components of an application, you can use the Rancher catalog to start launching applications, which are Helm charts. -For more information, see [Applications in a Project]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/). +For more information, see [Applications in a Project]({{}}/rancher/v2.x/en/catalog/apps/). ## Kubernetes Resources @@ -72,7 +72,7 @@ Within the context of a Rancher project or namespace, _resources_ are files and Resources include: -- [Certificates]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/certificates/): Files used to encrypt/decrypt data entering or leaving the cluster. -- [ConfigMaps]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/configmaps/): Files that store general configuration information, such as a group of config files. -- [Secrets]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/secrets/): Files that store sensitive data like passwords, tokens, or keys. -- [Registries]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/registries/): Files that carry credentials used to authenticate with private registries. +- [Certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/): Files used to encrypt/decrypt data entering or leaving the cluster. +- [ConfigMaps]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/): Files that store general configuration information, such as a group of config files. +- [Secrets]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/): Files that store sensitive data like passwords, tokens, or keys. +- [Registries]({{}}/rancher/v2.x/en/k8s-in-rancher/registries/): Files that carry credentials used to authenticate with private registries. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/certificates/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/certificates/_index.md index e4c3b501564..2ab2329eeeb 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/certificates/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/certificates/_index.md @@ -24,7 +24,7 @@ Add SSL certificates to either projects, namespaces, or both. A project scoped c - **Available to all namespaces in this project:** The certificate is available for any deployment in any namespaces in the project. - - **Available to a single namespace:** The certificate is only available for the deployments in one [namespace]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). If you choose this option, select a **Namespace** from the drop-down list or click **Add to a new namespace** to add the certificate to a namespace you create on the fly. + - **Available to a single namespace:** The certificate is only available for the deployments in one [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). If you choose this option, select a **Namespace** from the drop-down list or click **Add to a new namespace** to add the certificate to a namespace you create on the fly. 1. From **Private Key**, either copy and paste your certificate's private key into the text box (include the header and footer), or click **Read from a file** to browse to the private key on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. @@ -42,4 +42,4 @@ Add SSL certificates to either projects, namespaces, or both. A project scoped c ## What's Next? -Now you can add the certificate when launching an ingress within the current project or namespace. For more information, see [Adding Ingress]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/). +Now you can add the certificate when launching an ingress within the current project or namespace. For more information, see [Adding Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md index ea62cc86e4f..acd710150de 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md @@ -26,7 +26,7 @@ ConfigMaps accept key value pairs in common string formats, like config files or 1. Click **Save**. - >**Note:** Don't use ConfigMaps to store sensitive data [use a secret]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/secrets/). + >**Note:** Don't use ConfigMaps to store sensitive data [use a secret]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/). > >**Tip:** You can add multiple key value pairs to the ConfigMap by copying and pasting. > @@ -41,4 +41,4 @@ Now that you have a ConfigMap added to a namespace, you can add it to a workload - Application environment variables. - Specifying parameters for a Volume mounted to the workload. -For more information on adding ConfigMaps to a workload, see [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). +For more information on adding ConfigMaps to a workload, see [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md index 2301619cd7a..b5f6ea2d0b2 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md @@ -19,17 +19,17 @@ The way that you manage HPAs is different based on your version of the Kubernete HPAs are also managed differently based on your version of Rancher: -- **For Rancher v2.3.0+**: You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). -- **For Rancher Prior to v2.3.0:** To manage and configure HPAs, you need to use `kubectl`. For instructions on how to create, manage, and scale HPAs, refer to [Managing HPAs with kubectl]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl). +- **For Rancher v2.3.0+**: You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). +- **For Rancher Prior to v2.3.0:** To manage and configure HPAs, you need to use `kubectl`. For instructions on how to create, manage, and scale HPAs, refer to [Managing HPAs with kubectl]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl). You might have additional HPA installation steps if you are using an older version of Rancher: - **For Rancher v2.0.7+:** Clusters created in Rancher v2.0.7 and higher automatically have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use HPA. -- **For Rancher Prior to v2.0.7:** Clusters created in Rancher prior to v2.0.7 don't automatically have the requirements needed to use HPA. For instructions on installing HPA for these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7). +- **For Rancher Prior to v2.0.7:** Clusters created in Rancher prior to v2.0.7 don't automatically have the requirements needed to use HPA. For instructions on installing HPA for these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7). ## Testing HPAs with a Service Deployment -In Rancher v2.3.x+, you can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA.** For more information, refer to [Get HPA Metrics and Status]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/). +In Rancher v2.3.x+, you can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA.** For more information, refer to [Get HPA Metrics and Status]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/). You can also use `kubectl` to get the status of HPAs that you test with your load testing tool. For more information, refer to [Testing HPAs with kubectl] -({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). +({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md index 222b0cb3d8c..d0d487a49ed 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md @@ -20,7 +20,7 @@ HPA improves your services by: ## How HPA Works -![HPA Schema]({{< baseurl >}}/img/rancher/horizontal-pod-autoscaler.jpg) +![HPA Schema]({{}}/img/rancher/horizontal-pod-autoscaler.jpg) HPA is implemented as a control loop, with a period controlled by the `kube-controller-manager` flags below: diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md index 2d3cf10c87c..0d19fa185e2 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md @@ -13,11 +13,11 @@ This section describes HPA management with `kubectl`. This document has instruct ### Note For Rancher v2.3.x -In Rancher v2.3.x, you can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. +In Rancher v2.3.x, you can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. ### Note For Rancher Prior to v2.0.7 -Clusters created with older versions of Rancher don't automatically have all the requirements to create an HPA. To install an HPA on these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7). +Clusters created with older versions of Rancher don't automatically have all the requirements to create an HPA. To install an HPA on these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7). ##### Basic kubectl Command for Managing HPAs diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md index 5a3af016138..6b812cf8dd7 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md @@ -7,7 +7,7 @@ _Available as of v2.3.0_ The Rancher UI supports creating, managing, and deleting HPAs. You can configure CPU or memory usage as the metric that the HPA uses to scale. -If you want to create HPAs that scale based on other metrics than CPU and memory, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). +If you want to create HPAs that scale based on other metrics than CPU and memory, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). ## Creating an HPA @@ -25,7 +25,7 @@ If you want to create HPAs that scale based on other metrics than CPU and memory 1. Specify the **Minimum Scale** and **Maximum Scale** for the HPA. -1. Configure the metrics for the HPA. You can choose memory or CPU usage as the metric that will cause the HPA to scale the service up or down. In the **Quantity** field, enter the percentage of the workload's memory or CPU usage that will cause the HPA to scale the service. To configure other HPA metrics, including metrics available from Prometheus, you need to [manage HPAs using kubectl]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). +1. Configure the metrics for the HPA. You can choose memory or CPU usage as the metric that will cause the HPA to scale the service up or down. In the **Quantity** field, enter the percentage of the workload's memory or CPU usage that will cause the HPA to scale the service. To configure other HPA metrics, including metrics available from Prometheus, you need to [manage HPAs using kubectl]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). 1. Click **Create** to create the HPA. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md index cb49344658d..7df9409d618 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md @@ -3,7 +3,7 @@ title: Testing HPAs with kubectl weight: 3031 --- -This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/). +This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/). For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md index 096c69a6c17..6c56007e544 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md @@ -14,10 +14,10 @@ If you want your applications to be externally accessible, you must add a load b Rancher supports two types of load balancers: -- [Layer-4 Load Balancers]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) -- [Layer-7 Load Balancers]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) +- [Layer-4 Load Balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) +- [Layer-7 Load Balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) -For more information, see [load balancers]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). +For more information, see [load balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). ### Load Balancer Limitations @@ -28,9 +28,9 @@ Load Balancers have a couple of limitations you should be aware of: - If you want to use a load balancer with a Hosted Kubernetes cluster (i.e., clusters hosted in GKE, EKS, or AKS), the load balancer must be running within that cloud provider's infrastructure. Please review the compatibility tables regarding support for load balancers based on how you've provisioned your clusters: - - [Support for Layer-4 Load Balancing]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-4-load-balancing) + - [Support for Layer-4 Load Balancing]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-4-load-balancing) - - [Support for Layer-7 Load Balancing]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-7-load-balancing) + - [Support for Layer-7 Load Balancing]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-7-load-balancing) ## Ingress @@ -48,7 +48,7 @@ Ingress works in conjunction with one or more ingress controllers to dynamically Each Kubernetes Ingress resource corresponds roughly to a file in `/etc/nginx/sites-available/` containing a `server{}` configuration block, where requests for specific files and folders are configured. -Your ingress, which creates a port of entry to your cluster similar to a load balancer, can reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launcher clusters are powered by [Nginx](https://www.nginx.com/). +Your ingress, which creates a port of entry to your cluster similar to a load balancer, can reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launched clusters are powered by [Nginx](https://www.nginx.com/). Ingress can provide other functionality as well, such as SSL termination, name-based virtual hosting, and more. @@ -56,6 +56,6 @@ Ingress can provide other functionality as well, such as SSL termination, name-b > >Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global entry point for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. -- For more information on how to set up ingress in Rancher, see [Ingress]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress). +- For more information on how to set up ingress in Rancher, see [Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress). - For complete information about ingress and ingress controllers, see the [Kubernetes Ingress Documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) -- When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry, see [Global DNS]({{< baseurl >}}/rancher/v2.x/en/catalog/globaldns/). +- When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry, see [Global DNS]({{}}/rancher/v2.x/en/catalog/globaldns/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md index d90fc336f02..4392f9fedd8 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md @@ -6,7 +6,7 @@ aliases: - /rancher/v2.x/en/tasks/workloads/add-ingress/ --- -Ingress can be added for workloads to provide load balancing, SSL termination and host/path based routing. When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry]({{< baseurl >}}/rancher/v2.x/en/catalog/globaldns/). +Ingress can be added for workloads to provide load balancing, SSL termination and host/path based routing. When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry]({{}}/rancher/v2.x/en/catalog/globaldns/). 1. From the **Global** view, open the project that you want to add ingress to. @@ -14,7 +14,7 @@ Ingress can be added for workloads to provide load balancing, SSL termination an 1. Enter a **Name** for the ingress. -1. Select an existing **Namespace** from the drop-down list. Alternatively, you can create a new [namespace]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) on the fly by clicking **Add to a new namespace**. +1. Select an existing **Namespace** from the drop-down list. Alternatively, you can create a new [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) on the fly by clicking **Add to a new namespace**. 1. Create ingress forwarding **Rules**. @@ -65,7 +65,7 @@ Ingress can be added for workloads to provide load balancing, SSL termination an 1. If any of your ingress rules handle requests for encrypted ports, add a certificate to encrypt/decrypt communications. - >**Note:** You must have an SSL certificate that the ingress can use to encrypt/decrypt communications. For more information see [Adding SSL Certificates]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/certificates/). + >**Note:** You must have an SSL certificate that the ingress can use to encrypt/decrypt communications. For more information see [Adding SSL Certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/). 1. Click **Add Certificate**. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md index 9edfc95f878..7ae7742018d 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md @@ -67,8 +67,8 @@ The benefit of using xip.io is that you obtain a working entrypoint URL immediat #### Tutorials -- [Kubernetes installation with External Load Balancer (HTTPS/Layer 7)]({{< baseurl >}}/rancher/v2.x/en/installation/ha-server-install-external-lb) -- [Kubernetes installation with External Load Balancer (TCP/Layer 4)]({{< baseurl >}}/rancher/v2.x/en/installation/ha-server-install) -- [Docker Installation with External Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/single-node-install-external-lb) +- [Kubernetes installation with External Load Balancer (HTTPS/Layer 7)]({{}}/rancher/v2.x/en/installation/ha-server-install-external-lb) +- [Kubernetes installation with External Load Balancer (TCP/Layer 4)]({{}}/rancher/v2.x/en/installation/ha-server-install) +- [Docker Installation with External Load Balancer]({{}}/rancher/v2.x/en/installation/single-node-install-external-lb) diff --git a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/_index.md index e39437f23a7..bfe10301d3f 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/_index.md @@ -9,11 +9,11 @@ aliases: >**Notes:** > >- Pipelines are new and improved for Rancher v2.1! Therefore, if you configured pipelines while using v2.0.x, you'll have to reconfigure them after upgrading to v2.1. ->- Still using v2.0.x? See the pipeline documentation for [previous versions]({{< baseurl >}}/rancher/v2.x/en/tools/pipelines/docs-for-v2.0.x). +>- Still using v2.0.x? See the pipeline documentation for [previous versions]({{}}/rancher/v2.x/en/tools/pipelines/docs-for-v2.0.x). -Before setting up any pipelines, review the [pipeline overview]({{< baseurl >}}/rancher/v2.x/en/project-admin/pipelines/) and ensure that the project has [configured authentication to your version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/pipelines/#version-control-providers), e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/example/) to view some common pipeline deployments. +Before setting up any pipelines, review the [pipeline overview]({{}}/rancher/v2.x/en/project-admin/pipelines/) and ensure that the project has [configured authentication to your version control provider]({{}}/rancher/v2.x/en/project-admin/pipelines/#version-control-providers), e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/example/) to view some common pipeline deployments. -If you can access a project, you can enable repositories to start building pipelines. Only an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owner or member]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can authorize version control providers. +If you can access a project, you can enable repositories to start building pipelines. Only an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owner or member]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can authorize version control providers. ## Concepts @@ -131,7 +131,7 @@ stages: 1. _Available as of v2.2.0_ - **Notifications:** Decide if you want to set up notifications for your pipeline. You can enable notifications to any [notifiers]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) based on the build status of a pipeline. Before enabling notifications, Rancher recommends [setting up notifiers]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers) so it will be easy to add recipients immediately. + **Notifications:** Decide if you want to set up notifications for your pipeline. You can enable notifications to any [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) based on the build status of a pipeline. Before enabling notifications, Rancher recommends [setting up notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers) so it will be easy to add recipients immediately. {{% accordion id="notification" label="Configuring Notifications" %}} @@ -145,7 +145,7 @@ _Available as of v2.2.0_ 1. Select the conditions for the notification. You can select to get a notification for the following statuses: `Failed`, `Success`, `Changed`. For example, if you want to receive notifications when an execution fails, select **Failed**. -1. If you don't have any existing [notifiers]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers), Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. +1. If you don't have any existing [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers), Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. > **Note:** Notifiers are configured at a cluster level and require a different level of permissions. @@ -233,7 +233,7 @@ timeout: 30 Run your pipeline for the first time. From the project view in Rancher, go to **Resources > Pipelines.** (In versions prior to v2.3.0, go to the **Pipelines** tab.) Find your pipeline and select the vertical **Ellipsis (...) > Run**. -During this initial run, your pipeline is tested, and the following [pipeline components]({{< baseurl >}}/rancher/v2.x/en/project-admin/pipelines/#how-pipelines-work) are deployed to your project as workloads in a new namespace dedicated to the pipeline: +During this initial run, your pipeline is tested, and the following [pipeline components]({{}}/rancher/v2.x/en/project-admin/pipelines/#how-pipelines-work) are deployed to your project as workloads in a new namespace dedicated to the pipeline: - `docker-registry` - `jenkins` @@ -251,7 +251,7 @@ Available Events: * **Pull Request**: Whenever a pull request is made to the repository, the pipeline is triggered. * **Tag**: When a tag is created in the repository, the pipeline is triggered. -> **Note:** This option doesn't exist for Rancher's [example repositories]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/example/). +> **Note:** This option doesn't exist for Rancher's [example repositories]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/example/). ### Modifying the Event Triggers for the Repository @@ -374,7 +374,7 @@ stages: _Available as of v2.2.0_ -The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a [git hosted chart repository]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/). It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. +The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a [git hosted chart repository]({{}}/rancher/v2.x/en/catalog/custom/). It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. {{% tabs %}} @@ -691,7 +691,7 @@ stages: ### Secrets -If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/secrets/). +If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/). #### Prerequisite Create a secret in the same project as your pipeline, or explicitly in the namespace where pipeline build pods run. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/_index.md index 00ddc2f207f..75619cd6702 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/_index.md @@ -11,7 +11,7 @@ Rancher ships with several example repositories that you can use to familiarize - Maven - php -> **Note:** The example repositories are only available if you have not [configured a version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/pipelines). +> **Note:** The example repositories are only available if you have not [configured a version control provider]({{}}/rancher/v2.x/en/project-admin/pipelines). ## Configure Repositories @@ -67,4 +67,4 @@ After enabling an example repository, run the pipeline to see how it works. ## What's Next? -For detailed information about setting up your own pipeline for your repository, [configure a version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/pipelines), [enable a repository](#configure-repositories) and finally [configure your pipeline]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#pipeline-configuration). +For detailed information about setting up your own pipeline for your repository, [configure a version control provider]({{}}/rancher/v2.x/en/project-admin/pipelines), [enable a repository](#configure-repositories) and finally [configure your pipeline]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#pipeline-configuration). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md index 80c621c65fb..76a6887a055 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md @@ -30,7 +30,7 @@ Currently, deployments pull the private registry credentials automatically only >**Note:** Kubernetes classifies secrets, certificates, ConfigMaps, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your registry must have a unique name among all secrets within your workspace. -1. Select a **Scope** for the registry. You can either make the registry available for the entire project or a single [namespace]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). +1. Select a **Scope** for the registry. You can either make the registry available for the entire project or a single [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). 1. Select the website that hosts your private registry. Then enter credentials that authenticate with the registry. For example, if you use DockerHub, provide your DockerHub username and password. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md index b6f31611d9e..88f2c9603af 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md @@ -25,7 +25,7 @@ When creating a secret, you can make it available for any deployment within a pr >**Note:** Kubernetes classifies secrets, certificates, ConfigMaps, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your secret must have a unique name among all secrets within your workspace. -4. Select a **Scope** for the secret. You can either make the registry available for the entire project or a single [namespace]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). +4. Select a **Scope** for the secret. You can either make the registry available for the entire project or a single [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). 5. From **Secret Values**, click **Add Secret Value** to add a key value pair. Add as many values as you need. @@ -43,4 +43,4 @@ Any update to an active secrets won't automatically update the pods that are usi Now that you have a secret added to the project or namespace, you can add it to a workload that you deploy. -For more information on adding secret to a workload, see [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). +For more information on adding secret to a workload, see [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md index 6b0b289ef04..09334ecc2a0 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md @@ -8,7 +8,7 @@ aliases: For every workload created, a complementing Service Discovery entry is created. This Service Discovery entry enables DNS resolution for the workload's pods using the following naming convention: `..svc.cluster.local`. -However, you also have the option of creating additional Service Discovery records. You can use these additional records so that a given [namespace]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) resolves with one or more external IP addresses, an external hostname, an alias to another DNS record, other workloads, or a set of pods that match a selector that you create. +However, you also have the option of creating additional Service Discovery records. You can use these additional records so that a given [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) resolves with one or more external IP addresses, an external hostname, an alias to another DNS record, other workloads, or a set of pods that match a selector that you create. 1. From the **Global** view, open the project that you want to add a DNS record to. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/_index.md index 617929af284..3eed10c697d 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/workloads/_index.md @@ -71,9 +71,9 @@ There are several types of services available in Rancher. The descriptions below This section of the documentation contains instructions for deploying workloads and using workload options. -- [Deploy Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/) -- [Upgrade Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads/) -- [Rollback Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads/) +- [Deploy Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/) +- [Upgrade Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads/) +- [Rollback Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads/) ## Related Links diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md index 123c2fd295f..b899d310fe6 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md @@ -14,25 +14,25 @@ Deploy a workload to run an application in one or more containers. 1. Enter a **Name** for the workload. -1. Select a [workload type]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/). The workload defaults to a scalable deployment, by can change the workload type by clicking **More options.** +1. Select a [workload type]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/). The workload defaults to a scalable deployment, by can change the workload type by clicking **More options.** 1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy to the project, optionally prefacing it with the registry host (e.g. `quay.io`, `registry.gitlab.com`, etc.). During deployment, Rancher pulls this image from the specified public or private registry. If no registry host is provided, Rancher will pull the image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears in the registry server, including any required path, and optionally including the desired tag (e.g. `registry.gitlab.com/user/path/image:tag`). If no tag is provided, the `latest` tag will be automatically used. -1. Either select an existing [namespace]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces), or click **Add to a new namespace** and enter a new namespace. +1. Either select an existing [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces), or click **Add to a new namespace** and enter a new namespace. -1. Click **Add Port** to enter a port mapping, which enables access to the application inside and outside of the cluster . For more information, see [Services]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/#services). +1. Click **Add Port** to enter a port mapping, which enables access to the application inside and outside of the cluster . For more information, see [Services]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/#services). 1. Configure the remaining options: - **Environment Variables** - Use this section to either specify environment variables for your workload to consume on the fly, or to pull them from another source, such as a secret or [ConfigMap]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/configmaps/). + Use this section to either specify environment variables for your workload to consume on the fly, or to pull them from another source, such as a secret or [ConfigMap]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/). - **Node Scheduling** - **Health Check** - **Volumes** - Use this section to add storage for your workload. You can manually specify the volume that you want to add, use a persistent volume claim to dynamically create a volume for the workload, or read data for a volume to use from a file such as a [ConfigMap]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/configmaps/). + Use this section to add storage for your workload. You can manually specify the volume that you want to add, use a persistent volume claim to dynamically create a volume for the workload, or read data for a volume to use from a file such as a [ConfigMap]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/). When you are deploying a Stateful Set, you should use a Volume Claim Template when using Persistent Volumes. This will ensure that Persistent Volumes are created dynamically when you scale your Stateful Set. This option is available in the UI as of Rancher v2.2.0. @@ -44,7 +44,7 @@ Deploy a workload to run an application in one or more containers. > >- In [Amazon AWS](https://aws.amazon.com/), the nodes must be in the same Availability Zone and possess IAM permissions to attach/unattach volumes. > - >- The cluster must be using the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws) option. For more information on enabling this option see [Creating an Amazon EC2 Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) or [Creating a Custom Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/). + >- The cluster must be using the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws) option. For more information on enabling this option see [Creating an Amazon EC2 Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) or [Creating a Custom Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/). 1. Click **Show Advanced Options** and configure: diff --git a/content/rancher/v2.x/en/overview/_index.md b/content/rancher/v2.x/en/overview/_index.md index 92c84b5cb81..9a6b66224c3 100644 --- a/content/rancher/v2.x/en/overview/_index.md +++ b/content/rancher/v2.x/en/overview/_index.md @@ -22,7 +22,7 @@ Rancher provides an intuitive user interface for DevOps engineers to manage thei The following figure illustrates the role Rancher plays in IT and DevOps organizations. Each team deploys their applications on the public or private clouds they choose. IT administrators gain visibility and enforce policies across all users, clusters, and clouds. -![Platform]({{< baseurl >}}/img/rancher/platform.png) +![Platform]({{}}/img/rancher/platform.png) # Features of the Rancher API Server @@ -54,7 +54,7 @@ The Rancher API server is built on top of an embedded Kubernetes API server and # Editing Downstream Clusters with Rancher -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. +The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) diff --git a/content/rancher/v2.x/en/overview/architecture-recommendations/_index.md b/content/rancher/v2.x/en/overview/architecture-recommendations/_index.md index 016f7a8ce62..eb13e5562a9 100644 --- a/content/rancher/v2.x/en/overview/architecture-recommendations/_index.md +++ b/content/rancher/v2.x/en/overview/architecture-recommendations/_index.md @@ -45,14 +45,14 @@ We recommend the following configurations for the load balancer and Ingress cont * The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment.
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers
-![Rancher HA]({{< baseurl >}}/img/rancher/ha/rancher2ha.svg) +![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) Rancher installed on a Kubernetes cluster with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers # Environment for Kubernetes Installations It is strongly recommended to install Rancher on a Kubernetes cluster on hosted infrastructure such as Amazon's EC2 or Google Compute Engine. -For the best performance and greater security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. +For the best performance and greater security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. It is not recommended to install Rancher on top of a managed Kubernetes service such as Amazon’s EKS or Google Kubernetes Engine. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. diff --git a/content/rancher/v2.x/en/overview/architecture/_index.md b/content/rancher/v2.x/en/overview/architecture/_index.md index ffed8139364..8c05752602d 100644 --- a/content/rancher/v2.x/en/overview/architecture/_index.md +++ b/content/rancher/v2.x/en/overview/architecture/_index.md @@ -31,13 +31,13 @@ The majority of Rancher 2.x software runs on the Rancher Server. Rancher Server The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two downstream Kubernetes clusters: one created by RKE and another created by Amazon EKS (Elastic Kubernetes Service). -For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. +For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. The diagram below shows how users can manipulate both [Rancher-launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters and [hosted Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) clusters through Rancher's authentication proxy:
Managing Kubernetes Clusters through Rancher's Authentication Proxy
-![Architecture]({{< baseurl >}}/img/rancher/rancher-architecture-rancher-api-server.svg) +![Architecture]({{}}/img/rancher/rancher-architecture-rancher-api-server.svg) You can install Rancher on a single node, or on a high-availability Kubernetes cluster. diff --git a/content/rancher/v2.x/en/project-admin/_index.md b/content/rancher/v2.x/en/project-admin/_index.md index 1fa9df84378..508e627147d 100644 --- a/content/rancher/v2.x/en/project-admin/_index.md +++ b/content/rancher/v2.x/en/project-admin/_index.md @@ -18,19 +18,19 @@ Rancher projects resolve this issue by allowing you to apply resources and acces You can use projects to perform actions like: -- [Assign users access to a group of namespaces]({{< baseurl >}}/rancher/v2.x/en/project-admin/project-members) -- Assign users [specific roles in a project]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). A role can be owner, member, read-only, or [custom]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/) -- [Set resource quotas]({{< baseurl >}}/rancher/v2.x/en/project-admin/resource-quotas/) -- [Manage namespaces]({{< baseurl >}}/rancher/v2.x/en/project-admin/namespaces/) -- [Configure tools]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/) +- [Assign users access to a group of namespaces]({{}}/rancher/v2.x/en/project-admin/project-members) +- Assign users [specific roles in a project]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). A role can be owner, member, read-only, or [custom]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/) +- [Set resource quotas]({{}}/rancher/v2.x/en/project-admin/resource-quotas/) +- [Manage namespaces]({{}}/rancher/v2.x/en/project-admin/namespaces/) +- [Configure tools]({{}}/rancher/v2.x/en/project-admin/tools/) - [Set up pipelines for continuous integration and deployment]({{}}/rancher/v2.x/en/project-admin/pipelines) - [Configure pod security policies]({{}}/rancher/v2.x/en/project-admin/pod-security-policies) ### Authorization -Non-administrative users are only authorized for project access after an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owner or member]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) adds them to the project's **Members** tab. +Non-administrative users are only authorized for project access after an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owner or member]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) adds them to the project's **Members** tab. -Whoever creates the project automatically becomes a [project owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). +Whoever creates the project automatically becomes a [project owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). ## Switching between Projects diff --git a/content/rancher/v2.x/en/project-admin/namespaces/_index.md b/content/rancher/v2.x/en/project-admin/namespaces/_index.md index b8a400c9a79..82b308daf17 100644 --- a/content/rancher/v2.x/en/project-admin/namespaces/_index.md +++ b/content/rancher/v2.x/en/project-admin/namespaces/_index.md @@ -9,14 +9,14 @@ Although you assign resources at the project level so that each namespace in the Resources that you can assign directly to namespaces include: -- [Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/) -- [Load Balancers/Ingress]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/) -- [Service Discovery Records]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/service-discovery/) -- [Persistent Volume Claims]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) -- [Certificates]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/certificates/) -- [ConfigMaps]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/configmaps/) -- [Registries]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/registries/) -- [Secrets]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/secrets/) +- [Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/) +- [Load Balancers/Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/) +- [Service Discovery Records]({{}}/rancher/v2.x/en/k8s-in-rancher/service-discovery/) +- [Persistent Volume Claims]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) +- [Certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/) +- [ConfigMaps]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/) +- [Registries]({{}}/rancher/v2.x/en/k8s-in-rancher/registries/) +- [Secrets]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/) To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. @@ -27,7 +27,7 @@ To manage permissions in a vanilla Kubernetes cluster, cluster admins configure Create a new namespace to isolate apps and resources in a project. -When working with project resources that you can assign to a namespace (i.e., [workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/), [certificates]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/certificates/), [ConfigMaps]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/configmaps), etc.) you can create a namespace on the fly. +>**Tip:** When working with project resources that you can assign to a namespace (i.e., [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/), [certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/), [ConfigMaps]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps), etc.) you can create a namespace on the fly. 1. From the **Global** view, open the project where you want to create a namespace. @@ -35,7 +35,7 @@ When working with project resources that you can assign to a namespace (i.e., [w 1. From the main menu, select **Namespace**. The click **Add Namespace**. -1. **Optional:** If your project has [Resource Quotas]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) in effect, you can override the default resource **Limits** (which places a cap on the resources that the namespace can consume). +1. **Optional:** If your project has [Resource Quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) in effect, you can override the default resource **Limits** (which places a cap on the resources that the namespace can consume). 1. Enter a **Name** and then click **Create**. @@ -54,7 +54,7 @@ Cluster admins and members may occasionally need to move a namespace to another >**Notes:** > >- Don't move the namespaces in the `System` project. Moving these namespaces can adversely affect cluster networking. - >- You cannot move a namespace into a project that already has a [resource quota]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/) configured. + >- You cannot move a namespace into a project that already has a [resource quota]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/) configured. >- If you move a namespace from a project that has a quota set to a project with no quota set, the quota is removed from the namespace. 1. Choose a new project for the new namespace and then click **Move**. Alternatively, you can remove the namespace from all projects by selecting **None**. @@ -65,4 +65,4 @@ Cluster admins and members may occasionally need to move a namespace to another You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. -For more information, see how to [edit namespace resource quotas]({{< baseurl >}}/rancher/v2.x/en/project-admin//resource-quotas/override-namespace-default/#editing-namespace-resource-quotas). \ No newline at end of file +For more information, see how to [edit namespace resource quotas]({{}}/rancher/v2.x/en/project-admin//resource-quotas/override-namespace-default/#editing-namespace-resource-quotas). \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/pipelines/_index.md b/content/rancher/v2.x/en/project-admin/pipelines/_index.md index 0c65147cd77..521b01f16de 100644 --- a/content/rancher/v2.x/en/project-admin/pipelines/_index.md +++ b/content/rancher/v2.x/en/project-admin/pipelines/_index.md @@ -39,13 +39,13 @@ Typically, pipeline stages include: After the artifacts are published, you would release your application so users could start using the updated product. -Only [administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can [configure version control providers](#version-control-providers) and [manage global pipeline execution settings](#managing-global-pipeline-execution-settings). Project members can only configure [repositories]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#configuring-repositories) and [pipelines]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#pipeline-configuration). +Only [administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can [configure version control providers](#version-control-providers) and [manage global pipeline execution settings](#managing-global-pipeline-execution-settings). Project members can only configure [repositories]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#configuring-repositories) and [pipelines]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#pipeline-configuration). > **Notes:** > > - Pipelines were improved in Rancher v2.1. Therefore, if you configured pipelines while using v2.0.x, you'll have to reconfigure them after upgrading to v2.1. -> - Still using v2.0.x? See the pipeline documentation for [previous versions]({{< baseurl >}}/rancher/v2.x/en/tools/pipelines/docs-for-v2.0.x). +> - Still using v2.0.x? See the pipeline documentation for [previous versions]({{}}/rancher/v2.x/en/tools/pipelines/docs-for-v2.0.x). ## Overview @@ -95,7 +95,7 @@ After you configure a pipeline, you can trigger it using different methods: ## Version Control Providers -Before you can start [configuring a pipeline]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/) for your repository, you must configure and authorize a version control provider. +Before you can start [configuring a pipeline]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/) for your repository, you must configure and authorize a version control provider. | Provider | Available as of | | --- | --- | @@ -182,11 +182,11 @@ _Available as of v2.2.0_ {{% /tab %}} {{% /tabs %}} -**Result:** After the version control provider is authenticated, you will be automatically re-directed to start [configuring which repositories]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#configuring-repositories) that you want start using with a pipeline. Once a repository is enabled, you can start to [configure the pipeline]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#pipeline-configuration). +**Result:** After the version control provider is authenticated, you will be automatically re-directed to start [configuring which repositories]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#configuring-repositories) that you want start using with a pipeline. Once a repository is enabled, you can start to [configure the pipeline]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#pipeline-configuration). ## Managing Global Pipeline Execution Settings -After configuring a version control provider, there are several options that can be configured globally on how [pipelines]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/) are executed in Rancher. +After configuring a version control provider, there are several options that can be configured globally on how [pipelines]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/) are executed in Rancher. 1. From the **Global** view, navigate to the project that you want to configure pipelines. @@ -213,7 +213,7 @@ To configure compute resources for pipeline-step containers: You can configure compute resources for pipeline-step containers in the `.rancher-pipeline.yml` file. -In a [step type]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#step-types), you will provide the following information: +In a [step type]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#step-types), you will provide the following information: * **CPU Reservation (`CpuRequest`)**: CPU request for the container of a pipeline step. * **CPU Limit (`CpuLimit`)**: CPU limit for the container of a pipeline step. @@ -267,7 +267,7 @@ The internal [Docker registry](#how-pipelines-work) and the [Minio](#how-pipelin >**Prerequisites (for both parts A and B):** > ->[Persistent volumes]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#persistent-volumes) must be available for the cluster. +>[Persistent volumes]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#persistent-volumes) must be available for the cluster. ### A. Configuring Persistent Data for Docker Registry @@ -289,7 +289,7 @@ The internal [Docker registry](#how-pipelines-work) and the [Minio](#how-pipelin 1. Select a volume claim **Source**: - - If you select **Use a Storage Class to provision a new persistent volume**, select a [Storage Class]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#storage-classes) and enter a **Capacity**. + - If you select **Use a Storage Class to provision a new persistent volume**, select a [Storage Class]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#storage-classes) and enter a **Capacity**. - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. 1. From the **Customize** section, choose the read/write access for the volume. @@ -334,7 +334,7 @@ The internal [Docker registry](#how-pipelines-work) and the [Minio](#how-pipelin 1. Select a volume claim **Source**: - - If you select **Use a Storage Class to provision a new persistent volume**, select a [Storage Class]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#storage-classes) and enter a **Capacity**. + - If you select **Use a Storage Class to provision a new persistent volume**, select a [Storage Class]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#storage-classes) and enter a **Capacity**. - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. 1. From the **Customize** section, choose the read/write access for the volume. diff --git a/content/rancher/v2.x/en/project-admin/pipelines/docs-for-v2.0.x/_index.md b/content/rancher/v2.x/en/project-admin/pipelines/docs-for-v2.0.x/_index.md index 5febdc414c6..3a7ea74b4ba 100644 --- a/content/rancher/v2.x/en/project-admin/pipelines/docs-for-v2.0.x/_index.md +++ b/content/rancher/v2.x/en/project-admin/pipelines/docs-for-v2.0.x/_index.md @@ -5,7 +5,7 @@ aliases: - /rancher/v2.x/en/project-admin/tools/pipelines/docs-for-v2.0.x --- ->**Note:** This section describes the pipeline feature as implemented in Rancher v2.0.x. If you are using Rancher v2.1 or later, where pipelines have been significantly improved, please refer to the new documentation for [v2.1 or later]({{< baseurl >}}/rancher/v2.x/en/tools/pipelines). +>**Note:** This section describes the pipeline feature as implemented in Rancher v2.0.x. If you are using Rancher v2.1 or later, where pipelines have been significantly improved, please refer to the new documentation for [v2.1 or later]({{}}/rancher/v2.x/en/tools/pipelines). diff --git a/content/rancher/v2.x/en/project-admin/pod-security-policies/_index.md b/content/rancher/v2.x/en/project-admin/pod-security-policies/_index.md index e92356c11c6..c5e7417df02 100644 --- a/content/rancher/v2.x/en/project-admin/pod-security-policies/_index.md +++ b/content/rancher/v2.x/en/project-admin/pod-security-policies/_index.md @@ -3,14 +3,14 @@ title: Pod Security Policies weight: 5600 --- -> These cluster options are only available for [clusters in which Rancher has launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). +> These cluster options are only available for [clusters in which Rancher has launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). You can always assign a pod security policy (PSP) to an existing project if you didn't assign one during creation. ### Prerequisites -- Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/). -- Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [Existing Cluster: Adding a Pod Security Policy]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters/#adding-changing-a-pod-security-policy). +- Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/). +- Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [Existing Cluster: Adding a Pod Security Policy]({{}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters/#adding-changing-a-pod-security-policy). ### Applying a Pod Security Policy diff --git a/content/rancher/v2.x/en/project-admin/project-members/_index.md b/content/rancher/v2.x/en/project-admin/project-members/_index.md index 00c97f2098a..c1848a0de7c 100644 --- a/content/rancher/v2.x/en/project-admin/project-members/_index.md +++ b/content/rancher/v2.x/en/project-admin/project-members/_index.md @@ -10,11 +10,11 @@ If you want to provide a user with access and permissions to _specific_ projects You can add members to a project as it is created, or add them to an existing project. ->**Tip:** Want to provide a user with access to _all_ projects within a cluster? See [Adding Cluster Members]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/cluster-members/) instead. +>**Tip:** Want to provide a user with access to _all_ projects within a cluster? See [Adding Cluster Members]({{}}/rancher/v2.x/en/cluster-provisioning/cluster-members/) instead. ### Adding Members to a New Project -You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) +You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) ### Adding Members to an Existing Project @@ -36,7 +36,7 @@ Following project creation, you can add users as project members so that they ca 1. Assign the user or group **Project** roles. - [What are Project Roles?]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) + [What are Project Roles?]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) >**Notes:** > @@ -44,8 +44,8 @@ Following project creation, you can add users as project members so that they ca > >- For `Custom` roles, you can modify the list of individual roles available for assignment. > - > - To add roles to the list, [Add a Custom Role]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles). - > - To remove roles from the list, [Lock/Unlock Roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/). + > - To add roles to the list, [Add a Custom Role]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles). + > - To remove roles from the list, [Lock/Unlock Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/). **Result:** The chosen users are added to the project. diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/_index.md index 03bcf25570a..ad9f464df25 100644 --- a/content/rancher/v2.x/en/project-admin/resource-quotas/_index.md +++ b/content/rancher/v2.x/en/project-admin/resource-quotas/_index.md @@ -9,15 +9,15 @@ In situations where several teams share a cluster, one team may overconsume the This page is a how-to guide for creating resource quotas in existing projects. -Resource quotas can also be set when a new project is created. For details, refer to the section on [creating new projects.]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/projects-and-namespaces/#creating-projects) +Resource quotas can also be set when a new project is created. For details, refer to the section on [creating new projects.]({{}}/rancher/v2.x/en/cluster-admin/projects-and-namespaces/#creating-projects) -> Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects). For details on how resource quotas work with projects in Rancher, refer to [this page.](./quotas-for-projects) +> Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects). For details on how resource quotas work with projects in Rancher, refer to [this page.](./quotas-for-projects) ### Applying Resource Quotas to Existing Projects _Available as of v2.0.1_ -Edit [resource quotas]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) when: +Edit [resource quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) when: - You want to limit the resources that a project and its namespaces can use. - You want to scale the resources available to a project up or down when a research quota is already in effect. @@ -30,7 +30,7 @@ Edit [resource quotas]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-a 1. Expand **Resource Quotas** and click **Add Quota**. Alternatively, you can edit existing quotas. -1. Select a [Resource Type]({{< baseurl >}}/rancher/v2.x/en/project-admin/resource-quotas/#resource-quota-types). +1. Select a [Resource Type]({{}}/rancher/v2.x/en/project-admin/resource-quotas/#resource-quota-types). 1. Enter values for the **Project Limit** and the **Namespace Default Limit**. diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/override-container-default/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/override-container-default/_index.md index bd9d1517459..b20230f7c14 100644 --- a/content/rancher/v2.x/en/project-admin/resource-quotas/override-container-default/_index.md +++ b/content/rancher/v2.x/en/project-admin/resource-quotas/override-container-default/_index.md @@ -13,7 +13,7 @@ To avoid setting these limits on each and every container during workload creati _Available as of v2.2.0_ -Edit [container default resource limit]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#setting-container-default-resource-limit) when: +Edit [container default resource limit]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#setting-container-default-resource-limit) when: - You have a CPU or Memory resource quota set on a project, and want to supply the corresponding default values for a container. - You want to edit the default container resource limit. diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/_index.md index 0501008f985..2d7f83b4162 100644 --- a/content/rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/_index.md +++ b/content/rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/_index.md @@ -5,16 +5,16 @@ weight: 2 Although the **Namespace Default Limit** propagates from the project to each namespace, in some cases, you may need to increase (or decrease) the performance for a specific namespace. In this situation, you can override the default limits by editing the namespace. -In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it performs better. Therefore, the administrator [raises the namespace limits]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#editing-namespace-resource-quotas) for `Namespace 3` so that the namespace can access more resources. +In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it performs better. Therefore, the administrator [raises the namespace limits]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#editing-namespace-resource-quotas) for `Namespace 3` so that the namespace can access more resources. Namespace Default Limit Override -![Namespace Default Limit Override]({{< baseurl >}}/img/rancher/rancher-resource-quota-override.svg) +![Namespace Default Limit Override]({{}}/img/rancher/rancher-resource-quota-override.svg) -How to: [Editing Namespace Resource Quotas]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#editing-namespace-resource-quotas) +How to: [Editing Namespace Resource Quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#editing-namespace-resource-quotas) ### Editing Namespace Resource Quotas -If there is a [resource quota]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. +If there is a [resource quota]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. 1. From the **Global** view, open the cluster that contains the namespace for which you want to edit the resource quota. @@ -24,7 +24,7 @@ If there is a [resource quota]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pr 1. Edit the Resource Quota **Limits**. These limits determine the resources available to the namespace. The limits must be set within the configured project limits. - For more information about each **Resource Type**, see [Resource Quota Types]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#resource-quota-types). + For more information about each **Resource Type**, see [Resource Quota Types]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#resource-quota-types). >**Note:** > diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/_index.md index 73d7c180f80..3b1691f60b0 100644 --- a/content/rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/_index.md +++ b/content/rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/_index.md @@ -3,16 +3,16 @@ title: How Resource Quotas Work in Rancher Projects weight: 1 --- -Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects). +Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects). In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. In the following diagram, a Kubernetes administrator is trying to enforce a resource quota without Rancher. The administrator wants to apply a resource quota that sets the same CPU and memory limit to every namespace in his cluster (`Namespace 1-4`) . However, in the base version of Kubernetes, each namespace requires a unique resource quota. The administrator has to create four different resource quotas that have the same specs configured (`Resource Quota 1-4`) and apply them individually. Base Kubernetes: Unique Resource Quotas Being Applied to Each Namespace -![Native Kubernetes Resource Quota Implementation]({{< baseurl >}}/img/rancher/kubernetes-resource-quota.svg) +![Native Kubernetes Resource Quota Implementation]({{}}/img/rancher/kubernetes-resource-quota.svg) -Resource quotas are a little different in Rancher. In Rancher, you apply a resource quota to the [project]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects), and then the quota propagates to each namespace, whereafter Kubernetes enforces your limits using the native version of resource quotas. If you want to change the quota for a specific namespace, you can [override it](#overriding-the-default-limit-for-a-namespace). +Resource quotas are a little different in Rancher. In Rancher, you apply a resource quota to the [project]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects), and then the quota propagates to each namespace, whereafter Kubernetes enforces your limits using the native version of resource quotas. If you want to change the quota for a specific namespace, you can [override it](#overriding-the-default-limit-for-a-namespace). The resource quota includes two limits, which you set while creating or editing a project: @@ -28,7 +28,7 @@ The resource quota includes two limits, which you set while creating or editing In the following diagram, a Rancher administrator wants to apply a resource quota that sets the same CPU and memory limit for every namespace in their project (`Namespace 1-4`). However, in Rancher, the administrator can set a resource quota for the project (`Project Resource Quota`) rather than individual namespaces. This quota includes resource limits for both the entire project (`Project Limit`) and individual namespaces (`Namespace Default Limit`). Rancher then propagates the `Namespace Default Limit` quotas to each namespace (`Namespace Resource Quota`). Rancher: Resource Quotas Propagating to Each Namespace -![Rancher Resource Quota Implementation]({{< baseurl >}}/img/rancher/rancher-resource-quota.svg) +![Rancher Resource Quota Implementation]({{}}/img/rancher/rancher-resource-quota.svg) The following table explains the key differences between the two quota types. diff --git a/content/rancher/v2.x/en/project-admin/tools/alerts/_index.md b/content/rancher/v2.x/en/project-admin/tools/alerts/_index.md index fa9c7b0bdaa..786722a3827 100644 --- a/content/rancher/v2.x/en/project-admin/tools/alerts/_index.md +++ b/content/rancher/v2.x/en/project-admin/tools/alerts/_index.md @@ -9,7 +9,7 @@ Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://p Before you can receive alerts, one or more [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) must be configured at the cluster level. -Only [administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can manage project alerts. +Only [administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can manage project alerts. This section covers the following topics: @@ -20,7 +20,7 @@ This section covers the following topics: ## Alerts Scope -The scope for alerts can be set at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/alerts/) or project level. +The scope for alerts can be set at either the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) or project level. At the project level, Rancher monitors specific deployments and sends alerts for: @@ -123,13 +123,13 @@ This alert type monitors for the availability of all workloads marked with tags
_Available as of v2.2.4_ -If you enable [project monitoring]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/#monitoring), this alert type monitors for the overload from Prometheus expression querying. +If you enable [project monitoring]({{}}/rancher/v2.x/en/project-admin/tools/#monitoring), this alert type monitors for the overload from Prometheus expression querying. 1. Input or select an **Expression**, the drop down shows the original metrics from Prometheus, including: - [**Container**](https://github.com/google/cadvisor) - [**Kubernetes Resources**](https://github.com/kubernetes/kube-state-metrics) - - [**Customize**]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/monitoring/#project-metrics) + - [**Customize**]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#project-metrics) - [**Project Level Grafana**](http://docs.grafana.org/administration/metrics/) - **Project Level Prometheus** @@ -167,7 +167,7 @@ If you enable [project monitoring]({{< baseurl >}}/rancher/v2.x/en/project-admin 1. Continue adding more **Alert Rule** to the group. -1. Finally, choose the [notifiers]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) that send you alerts. +1. Finally, choose the [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) that send you alerts. - You can set up multiple notifiers. - You can change notifier recipients on the fly. diff --git a/content/rancher/v2.x/en/project-admin/tools/logging/_index.md b/content/rancher/v2.x/en/project-admin/tools/logging/_index.md index 5e842ce96c7..8c60ddf64eb 100644 --- a/content/rancher/v2.x/en/project-admin/tools/logging/_index.md +++ b/content/rancher/v2.x/en/project-admin/tools/logging/_index.md @@ -17,7 +17,7 @@ Rancher supports the following services: >**Note:** You can only configure one logging service per cluster or per project. -Only [administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure Rancher to send Kubernetes logs to a logging service. +Only [administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure Rancher to send Kubernetes logs to a logging service. ## Requirements @@ -41,7 +41,7 @@ Setting up a logging service to collect logs from your cluster/project has sever You can configure logging at either cluster level or project level. -- [Cluster logging]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/) writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), it also writes logs for all the Kubernetes system components. +- [Cluster logging]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), it also writes logs for all the Kubernetes system components. - Project logging writes logs for every pod in that particular project. @@ -59,11 +59,11 @@ Logs that are sent to your logging service are from the following locations: 1. Select a logging service and enter the configuration. Refer to the specific service for detailed configuration. Rancher supports the following services: - - [Elasticsearch]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/elasticsearch/) - - [Splunk]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/splunk/) - - [Kafka]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/kafka/) - - [Syslog]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/syslog/) - - [Fluentd]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/fluentd/) + - [Elasticsearch]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/elasticsearch/) + - [Splunk]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/splunk/) + - [Kafka]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/kafka/) + - [Syslog]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/syslog/) + - [Fluentd]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/fluentd/) 1. (Optional) Instead of using the UI to configure the logging services, you can enter custom advanced configurations by clicking on **Edit as File**, which is located above the logging targets. This link is only visible after you select a logging service. diff --git a/content/rancher/v2.x/en/project-admin/tools/monitoring/_index.md b/content/rancher/v2.x/en/project-admin/tools/monitoring/_index.md index 7174c065867..c5372b0ba6d 100644 --- a/content/rancher/v2.x/en/project-admin/tools/monitoring/_index.md +++ b/content/rancher/v2.x/en/project-admin/tools/monitoring/_index.md @@ -19,19 +19,19 @@ This section covers the following topics: ### Monitoring Scope -Using Prometheus, you can monitor Rancher at both the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) and project level. For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. +Using Prometheus, you can monitor Rancher at both the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) and project level. For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. -- [Cluster monitoring]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. +- [Cluster monitoring]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. - - [Kubernetes control plane]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#kubernetes-components-metrics) - - [etcd database]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#etcd-metrics) - - [All nodes (including workers)]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#cluster-metrics) + - [Kubernetes control plane]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#kubernetes-components-metrics) + - [etcd database]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#etcd-metrics) + - [All nodes (including workers)]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#cluster-metrics) - Project monitoring allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. ### Permissions to Configure Project Monitoring -Only [administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure project level monitoring. Project members can only view monitoring metrics. +Only [administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure project level monitoring. Project members can only view monitoring metrics. ### Enabling Project Monitoring @@ -41,7 +41,7 @@ Only [administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global 1. Select **Tools > Monitoring** in the navigation bar. -1. Select **Enable** to show the [Prometheus configuration options]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/prometheus/). Enter in your desired configuration options. +1. Select **Enable** to show the [Prometheus configuration options]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/prometheus/). Enter in your desired configuration options. 1. Click **Save**. @@ -53,11 +53,11 @@ Prometheus|750m| 750Mi | 1000m | 1000Mi | Yes Grafana | 100m | 100Mi | 200m | 200Mi | No -**Result:** A single application,`project-monitoring`, is added as an [application]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/) to the project. After the application is `active`, you can start viewing [project metrics](#project-metrics) through the [Rancher dashboard]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#rancher-dashboard) or directly from [Grafana]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#grafana). +**Result:** A single application,`project-monitoring`, is added as an [application]({{}}/rancher/v2.x/en/catalog/apps/) to the project. After the application is `active`, you can start viewing [project metrics](#project-metrics) through the [Rancher dashboard]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#rancher-dashboard) or directly from [Grafana]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#grafana). ### Project Metrics -[Workload metrics]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#workload-metrics) are available for the project if monitoring is enabled at the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) and at the [project level.](#enabling-project-monitoring) +[Workload metrics]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#workload-metrics) are available for the project if monitoring is enabled at the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) and at the [project level.](#enabling-project-monitoring) You can monitor custom metrics from any [exporters.](https://prometheus.io/docs/instrumenting/exporters/) You can also expose some custom endpoints on deployments without needing to configure Prometheus for your project. diff --git a/content/rancher/v2.x/en/quick-start-guide/_index.md b/content/rancher/v2.x/en/quick-start-guide/_index.md index 630450f42d2..be103b469ef 100644 --- a/content/rancher/v2.x/en/quick-start-guide/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/_index.md @@ -4,14 +4,14 @@ metaDescription: Use this section to jump start your Rancher deployment and test short title: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. weight: 25 --- ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{< baseurl >}}/rancher/v2.x/en/installation/). +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.x/en/installation/). Howdy buckaroos! Use this section of the docs to jump start your deployment and testing of Rancher 2.x! It contains instructions for a simple Rancher setup and some common use cases. We plan on adding more content to this section in the future. We have Quick Start Guides for: -- [Deploying Rancher Server]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/deployment/): Get started running Rancher using the method most convenient for you. +- [Deploying Rancher Server]({{}}/rancher/v2.x/en/quick-start-guide/deployment/): Get started running Rancher using the method most convenient for you. -- [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/workload/): Deploy a simple workload and expose it, letting you access it from outside the cluster. +- [Deploying Workloads]({{}}/rancher/v2.x/en/quick-start-guide/workload/): Deploy a simple workload and expose it, letting you access it from outside the cluster. -- [Using the CLI]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/cli/): Use `kubectl` or Rancher command line interface (CLI) to interact with your Rancher instance. +- [Using the CLI]({{}}/rancher/v2.x/en/quick-start-guide/cli/): Use `kubectl` or Rancher command line interface (CLI) to interact with your Rancher instance. diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/_index.md index 8023d181f51..3e9ddae02c2 100644 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/_index.md @@ -61,7 +61,7 @@ Two Kubernetes clusters are deployed into your AWS account, one running Rancher ### What's Next? -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/workload). +Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.x/en/quick-start-guide/workload). ## Destroying the Environment diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/_index.md index 3e1f67b37e2..95b4820090d 100644 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/_index.md @@ -59,7 +59,7 @@ Two Kubernetes clusters are deployed into your DigitalOcean account, one running ### What's Next? -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/workload). +Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.x/en/quick-start-guide/workload). ## Destroying the Environment diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md index ee362745c46..b4c2457eeaa 100644 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md @@ -38,7 +38,7 @@ This Quick Start Guide is divided into different tasks for easier consumption. > > For a full list of port requirements, refer to [Docker Installation]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/). - Provision the host according to our [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/). + Provision the host according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements/). ### 2. Install Rancher @@ -105,4 +105,4 @@ Congratulations! You have created your first cluster. #### What's Next? -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/workload). +Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.x/en/quick-start-guide/workload). diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/_index.md index c9bb875285a..bf8db298c3c 100644 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/_index.md @@ -29,7 +29,7 @@ The following steps quickly deploy a Rancher Server with a single node cluster a ### What's Next? -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/workload). +Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.x/en/quick-start-guide/workload). ## Destroying the Environment diff --git a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md index ebf52672472..df4b32406cc 100644 --- a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md @@ -77,6 +77,6 @@ Congratulations! You have successfully deployed a workload exposed via an ingres When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: -- [Amazon AWS: Destroying the Environment]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/#destroying-the-environment) -- [DigitalOcean: Destroying the Environment]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/#destroying-the-environment) -- [Vagrant: Destroying the Environment]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/#destroying-the-environment) +- [Amazon AWS: Destroying the Environment]({{}}/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/#destroying-the-environment) +- [DigitalOcean: Destroying the Environment]({{}}/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/#destroying-the-environment) +- [Vagrant: Destroying the Environment]({{}}/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/#destroying-the-environment) diff --git a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md index ace03022684..71d79215dd9 100644 --- a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md @@ -33,15 +33,15 @@ For this workload, you'll be deploying the application Rancher Hello-World. 9. From the **As a** drop-down, make sure that **NodePort (On every node)** is selected. - ![As a dropdown, NodePort (On every node selected)]({{< baseurl >}}/img/rancher/nodeport-dropdown.png) + ![As a dropdown, NodePort (On every node selected)]({{}}/img/rancher/nodeport-dropdown.png) 10. From the **On Listening Port** field, leave the **Random** value in place. - ![On Listening Port, Random selected]({{< baseurl >}}/img/rancher/listening-port-field.png) + ![On Listening Port, Random selected]({{}}/img/rancher/listening-port-field.png) 11. From the **Publish the container port** field, enter port `80`. - ![Publish the container port, 80 entered]({{< baseurl >}}/img/rancher/container-port-field.png) + ![Publish the container port, 80 entered]({{}}/img/rancher/container-port-field.png) 12. Leave the remaining options on their default setting. We'll tell you about them later. @@ -151,6 +151,6 @@ Congratulations! You have successfully deployed a workload exposed via a NodePor When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: -- [Amazon AWS: Destroying the Environment]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/#destroying-the-environment) -- [DigitalOcean: Destroying the Environment]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/#destroying-the-environment) -- [Vagrant: Destroying the Environment]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/#destroying-the-environment) +- [Amazon AWS: Destroying the Environment]({{}}/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/#destroying-the-environment) +- [DigitalOcean: Destroying the Environment]({{}}/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/#destroying-the-environment) +- [Vagrant: Destroying the Environment]({{}}/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/#destroying-the-environment) diff --git a/content/rancher/v2.x/en/security/_index.md b/content/rancher/v2.x/en/security/_index.md index b7a56c2fc96..8cae4eebe29 100644 --- a/content/rancher/v2.x/en/security/_index.md +++ b/content/rancher/v2.x/en/security/_index.md @@ -98,7 +98,7 @@ Rancher is committed to informing the community of security issues in our produc | ID | Description | Date | Resolution | |----|-------------|------|------------| -| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/). | +| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions]({{}}/rancher/v2.x/en/upgrades/rollbacks/). | | [CVE-2019-6287](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6287) | Project members continue to get access to namespaces from projects that they were removed from if they were added to more than one project. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) | | [CVE-2019-11202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11202) | The default admin, that is shipped with Rancher, will be re-created upon restart of Rancher despite being explicitly deleted. | 16 Apr 2019 | [Rancher v2.2.2](https://github.com/rancher/rancher/releases/tag/v2.2.2), [Rancher v2.1.9](https://github.com/rancher/rancher/releases/tag/v2.1.9) and [Rancher v2.0.14](https://github.com/rancher/rancher/releases/tag/v2.0.14) | | [CVE-2019-12274](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12274) | Nodes using the built-in node drivers using a file path option allows the machine to read arbitrary files including sensitive ones from inside the Rancher server container. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | diff --git a/content/rancher/v2.x/en/security/hardening-2.1/_index.md b/content/rancher/v2.x/en/security/hardening-2.1/_index.md index 890f17f35a8..0248d9f3f9d 100644 --- a/content/rancher/v2.x/en/security/hardening-2.1/_index.md +++ b/content/rancher/v2.x/en/security/hardening-2.1/_index.md @@ -15,7 +15,7 @@ Hardening Guide v2.1 | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes 1.11 [Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.1.x/Rancher_Hardening_Guide.pdf) -For more detail on how a hardened cluster scores against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.1.x]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.1/). +For more detail on how a hardened cluster scores against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.1.x]({{}}/rancher/v2.x/en/security/benchmark-2.1/). ### Profile Definitions diff --git a/content/rancher/v2.x/en/security/hardening-2.2/_index.md b/content/rancher/v2.x/en/security/hardening-2.2/_index.md index 64db81ee176..de19613499f 100644 --- a/content/rancher/v2.x/en/security/hardening-2.2/_index.md +++ b/content/rancher/v2.x/en/security/hardening-2.2/_index.md @@ -15,7 +15,7 @@ Hardening Guide v2.2 | Rancher v2.2.x | Benchmark v1.4.1, 1.4.0 | Kubernetes 1.1 [Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.2.x/Rancher_Hardening_Guide.pdf) -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.2.x]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.2/). +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.2.x]({{}}/rancher/v2.x/en/security/benchmark-2.2/). ### Profile Definitions diff --git a/content/rancher/v2.x/en/security/hardening-2.3.3/_index.md b/content/rancher/v2.x/en/security/hardening-2.3.3/_index.md index 488c6fbcc4d..d25489d2e06 100644 --- a/content/rancher/v2.x/en/security/hardening-2.3.3/_index.md +++ b/content/rancher/v2.x/en/security/hardening-2.3.3/_index.md @@ -15,7 +15,7 @@ Hardening Guide v2.3.3 | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes 1.14, 1. [Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.3/Rancher_Hardening_Guide.pdf) -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide v2.3.3]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.3.3/). +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide v2.3.3]({{}}/rancher/v2.x/en/security/benchmark-2.3.3/). ### Profile Definitions @@ -149,7 +149,7 @@ Verify that the permissions are `700` or more restrictive. **Remediation** -Follow the steps as documented in [1.4.12]({{< baseurl >}}/rancher/v2.x/en/security/hardening-2.3.3/#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. +Follow the steps as documented in [1.4.12]({{}}/rancher/v2.x/en/security/hardening-2.3.3/#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. ### 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` diff --git a/content/rancher/v2.x/en/security/hardening-2.3/_index.md b/content/rancher/v2.x/en/security/hardening-2.3/_index.md index dfa51e8eb20..f237643c192 100644 --- a/content/rancher/v2.x/en/security/hardening-2.3/_index.md +++ b/content/rancher/v2.x/en/security/hardening-2.3/_index.md @@ -14,7 +14,7 @@ Hardening Guide v2.3 | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes 1.1 [Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.x/Rancher_Hardening_Guide.pdf) -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.x]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.3/). +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.x]({{}}/rancher/v2.x/en/security/benchmark-2.3/). ### Profile Definitions @@ -411,7 +411,7 @@ Verify that the permissions are `700` or more restrictive. **Remediation** -Follow the steps as documented in [1.4.12]({{< baseurl >}}/rancher/v2.x/en/security/hardening-2.3/#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. +Follow the steps as documented in [1.4.12]({{}}/rancher/v2.x/en/security/hardening-2.3/#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. ### 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` diff --git a/content/rancher/v2.x/en/system-tools/_index.md b/content/rancher/v2.x/en/system-tools/_index.md index 10a48611e45..a34bc1a9b8d 100644 --- a/content/rancher/v2.x/en/system-tools/_index.md +++ b/content/rancher/v2.x/en/system-tools/_index.md @@ -3,7 +3,7 @@ title: System Tools weight: 6001 --- -System Tools is a tool to perform operational tasks on [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters or [RKE cluster as used for installing Rancher on Kubernetes]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/). The tasks include: +System Tools is a tool to perform operational tasks on [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters or [RKE cluster as used for installing Rancher on Kubernetes]({{}}/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/). The tasks include: * Collect logging and system metrics from nodes. * Remove Kubernetes resources created by Rancher. @@ -41,7 +41,7 @@ After you download the tools, complete the following actions: # Logs -The logs subcommand will collect log files of core Kubernetes cluster components from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) or nodes on an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/). See [Troubleshooting]({{< baseurl >}}//rancher/v2.x/en/troubleshooting/) for a list of core Kubernetes cluster components. +The logs subcommand will collect log files of core Kubernetes cluster components from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) or nodes on an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/). See [Troubleshooting]({{}}//rancher/v2.x/en/troubleshooting/) for a list of core Kubernetes cluster components. System Tools will use the provided kubeconfig file to deploy a DaemonSet, that will copy all the logfiles from the core Kubernetes cluster components and add them to a single tar file (`cluster-logs.tar` by default). If you only want to collect logging from a single node, you can specify the node by using `--node NODENAME` or `-n NODENAME`. @@ -81,7 +81,7 @@ The following are the options for the stats command: # Remove ->**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{< baseurl >}}/rancher/v2.x/en/backups/backups) before executing the command. +>**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{}}/rancher/v2.x/en/backups/backups) before executing the command. When you install Rancher on a Kubernetes cluster, it will create Kubernetes resources to run and to store configuration data. If you want to remove Rancher from your cluster, you can use the `remove` subcommand to remove the Kubernetes resources. When you use the `remove` subcommand, the following resources will be removed: @@ -101,7 +101,7 @@ When you install Rancher on a Kubernetes cluster, it will create Kubernetes reso When you run the command below, all the resources listed [above](#remove) will be removed from the cluster. ->**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{< baseurl >}}/rancher/v2.x/en/backups/backups) before executing the command. +>**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{}}/rancher/v2.x/en/backups/backups) before executing the command. ``` ./system-tools remove --kubeconfig --namespace diff --git a/content/rancher/v2.x/en/troubleshooting/_index.md b/content/rancher/v2.x/en/troubleshooting/_index.md index 7f6b30c3891..aa6c57afe66 100644 --- a/content/rancher/v2.x/en/troubleshooting/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/_index.md @@ -5,7 +5,7 @@ weight: 8100 This section contains information to help you troubleshoot issues when using Rancher. -- [Kubernetes components]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/kubernetes-components/) +- [Kubernetes components]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/) If you need help troubleshooting core Kubernetes cluster components like: * `etcd` @@ -16,22 +16,22 @@ This section contains information to help you troubleshoot issues when using Ran * `kube-proxy` * `nginx-proxy` -- [Kubernetes resources]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/) +- [Kubernetes resources]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/) Options for troubleshooting Kubernetes resources like Nodes, Ingress Controller and Rancher Agents are described in this section. -- [Networking]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/networking/) +- [Networking]({{}}/rancher/v2.x/en/troubleshooting/networking/) Steps to troubleshoot networking issues can be found here. -- [DNS]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/dns/) +- [DNS]({{}}/rancher/v2.x/en/troubleshooting/dns/) When you experience name resolution issues in your cluster. -- [Troubleshooting Rancher installed on Kubernetes]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/rancherha/) +- [Troubleshooting Rancher installed on Kubernetes]({{}}/rancher/v2.x/en/troubleshooting/rancherha/) - If you experience issues with your [Rancher server installed on Kubernetes]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/) + If you experience issues with your [Rancher server installed on Kubernetes]({{}}/rancher/v2.x/en/installation/k8s-install/) -- [Imported clusters]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/imported-clusters/) +- [Imported clusters]({{}}/rancher/v2.x/en/troubleshooting/imported-clusters/) - If you experience issues when [Importing Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) + If you experience issues when [Importing Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) diff --git a/content/rancher/v2.x/en/troubleshooting/dns/_index.md b/content/rancher/v2.x/en/troubleshooting/dns/_index.md index f64f6e5729b..ecbe88a7588 100644 --- a/content/rancher/v2.x/en/troubleshooting/dns/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/dns/_index.md @@ -7,7 +7,7 @@ The commands/steps listed on this page can be used to check name resolution issu Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. -Before running the DNS checks, check the [default DNS provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/networking/#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. +Before running the DNS checks, check the [default DNS provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly]({{}}/rancher/v2.x/en/troubleshooting/networking/#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. ### Check if DNS pods are running @@ -196,7 +196,7 @@ services: > **Note:** As the `kubelet` is running inside a container, the path for files located in `/etc` and `/usr` are in `/host/etc` and `/host/usr` inside the `kubelet` container. -See [Editing Cluster as YAML]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters/#editing-cluster-as-yaml) how to apply this change. When the provisioning of the cluster has finished, you have to remove the kube-dns pod to activate the new setting in the pod: +See [Editing Cluster as YAML]({{}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters/#editing-cluster-as-yaml) how to apply this change. When the provisioning of the cluster has finished, you have to remove the kube-dns pod to activate the new setting in the pod: ``` kubectl delete pods -n kube-system -l k8s-app=kube-dns diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md index 0c73699ee9f..d2e32f91537 100644 --- a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md @@ -3,7 +3,7 @@ title: Kubernetes Components weight: 100 --- -The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. +The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. This section includes troubleshooting tips in the following categories: @@ -14,5 +14,5 @@ This section includes troubleshooting tips in the following categories: # Kubernetes Component Diagram -![Cluster diagram]({{< baseurl >}}/img/rancher/clusterdiagram.svg)
+![Cluster diagram]({{}}/img/rancher/clusterdiagram.svg)
Lines show the traffic flow between components. Colors are used purely for visual aid \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md index a94b1a04ee7..1ca42591cf2 100644 --- a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md @@ -29,7 +29,7 @@ bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." # Controlplane Container Logging -> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/#kubernetes-leader-election) how to retrieve the current leader. +> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/#kubernetes-leader-election) how to retrieve the current leader. The logging of the containers can contain information on what the problem could be. diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-resources/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-resources/_index.md index c8eae70b743..f4a6b8aecf1 100644 --- a/content/rancher/v2.x/en/troubleshooting/kubernetes-resources/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-resources/_index.md @@ -3,7 +3,7 @@ title: Kubernetes resources weight: 101 --- -The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. +The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/content/rancher/v2.x/en/troubleshooting/networking/_index.md b/content/rancher/v2.x/en/troubleshooting/networking/_index.md index d76fbf67773..7259b61a3e0 100644 --- a/content/rancher/v2.x/en/troubleshooting/networking/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/networking/_index.md @@ -112,7 +112,7 @@ If there is no output, the cluster is not affected. |------------|------------| | GitHub issue | [#15146](https://github.com/rancher/rancher/issues/15146) | -If pods in system namespaces cannot communicate with pods in other system namespaces, you will need to follow the instructions in [Upgrading to v2.0.7+ — Namespace Migration]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/) to restore connectivity. Symptoms include: +If pods in system namespaces cannot communicate with pods in other system namespaces, you will need to follow the instructions in [Upgrading to v2.0.7+ — Namespace Migration]({{}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/) to restore connectivity. Symptoms include: - NGINX ingress controller showing `504 Gateway Time-out` when accessed. - NGINX ingress controller logging `upstream timed out (110: Connection timed out) while connecting to upstream` when accessed. diff --git a/content/rancher/v2.x/en/upgrades/_index.md b/content/rancher/v2.x/en/upgrades/_index.md index 5fdcdc3dc16..1d0ce9190a5 100644 --- a/content/rancher/v2.x/en/upgrades/_index.md +++ b/content/rancher/v2.x/en/upgrades/_index.md @@ -7,7 +7,7 @@ aliases: ### Upgrading Rancher -- [Upgrades]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/) +- [Upgrades]({{}}/rancher/v2.x/en/upgrades/upgrades/) ### Rolling Back Unsuccessful Upgrades @@ -16,7 +16,7 @@ In the event that your Rancher Server does not upgrade successfully, you can rol - [Rollbacks for Rancher installed with Docker]({{}}/rancher/v2.x/en/upgrades/single-node-rollbacks) - [Rollbacks for Rancher installed on a Kubernetes cluster]({{}}/rancher/v2.x/en/upgrades/ha-server-rollbacks) -> **Note:** If you are rolling back to versions in either of these scenarios, you must follow some extra [instructions]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/) in order to get your clusters working. +> **Note:** If you are rolling back to versions in either of these scenarios, you must follow some extra [instructions]({{}}/rancher/v2.x/en/upgrades/rollbacks/) in order to get your clusters working. > >- Rolling back from v2.1.6+ to any version between v2.1.0 - v2.1.5 or v2.0.0 - v2.0.10. >- Rolling back from v2.0.11+ to any version between v2.0.0 - v2.0.10. diff --git a/content/rancher/v2.x/en/upgrades/rollbacks/_index.md b/content/rancher/v2.x/en/upgrades/rollbacks/_index.md index 245af441455..4a3c79a010a 100644 --- a/content/rancher/v2.x/en/upgrades/rollbacks/_index.md +++ b/content/rancher/v2.x/en/upgrades/rollbacks/_index.md @@ -32,7 +32,7 @@ Because of the changes necessary to address [CVE-2018-20321](https://cve.mitre.o 2. After executing the command a `tokens.json` file will be created. Important! Back up this file in a safe place.** You will need it to restore functionality to your clusters after rolling back Rancher. **If you lose this file, you may lose access to your clusters.** -3. Rollback Rancher following the [normal instructions]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/). +3. Rollback Rancher following the [normal instructions]({{}}/rancher/v2.x/en/upgrades/rollbacks/). 4. Once Rancher comes back up, every cluster managed by Rancher (except for Imported clusters) will be in an `Unavailable` state. diff --git a/content/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/_index.md b/content/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/_index.md index 3288777bd26..2cca7a4b78a 100644 --- a/content/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/_index.md +++ b/content/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/_index.md @@ -7,7 +7,7 @@ aliases: If you upgrade Rancher and the upgrade does not complete successfully, you may need to rollback your Rancher Server to its last healthy state. -To restore Rancher follow the procedure detailed here: [Restoring Backups — Kubernetes installs]({{< baseurl >}}/rancher/v2.x/en/backups/restorations/ha-restoration) +To restore Rancher follow the procedure detailed here: [Restoring Backups — Kubernetes installs]({{}}/rancher/v2.x/en/backups/restorations/ha-restoration) Restoring a snapshot of the Rancher Server cluster will revert Rancher to the version and state at the time of the snapshot. diff --git a/content/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/_index.md b/content/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/_index.md index 0a041e08ae8..3ad649798e0 100644 --- a/content/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/_index.md +++ b/content/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/_index.md @@ -24,7 +24,7 @@ In this command, `` is the version of Rancher you were ru Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the [procedure below](#creating-a-backup). Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{< baseurl >}}/img/rancher/placeholder-ref-2.png) +![Placeholder Reference]({{}}/img/rancher/placeholder-ref-2.png) | Placeholder | Example | Description | | -------------------------- | -------------------------- | ------------------------------------------------------- | @@ -59,9 +59,9 @@ If you have issues upgrading Rancher, roll it back to its latest known healthy s ``` You can obtain the name for your Rancher container by entering `docker ps`. -1. Move the backup tarball that you created during completion of [Docker Upgrade]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. +1. Move the backup tarball that you created during completion of [Docker Upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - If you followed the naming convention we suggested in [Docker Upgrade]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/), it will have a name similar to (`rancher-data-backup--.tar.gz`). + If you followed the naming convention we suggested in [Docker Upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/), it will have a name similar to (`rancher-data-backup--.tar.gz`). 1. Run the following command to replace the data in the `rancher-data` container with the data in the backup tarball, replacing the [placeholder](#before-you-start). Don't forget to close the quotes. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/_index.md index d83b0af6f5a..68539cc09c1 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/_index.md @@ -14,17 +14,17 @@ The following table lists some of the most noteworthy issues to be considered wh Upgrade Scenario | Issue ---|--- Upgrading to v2.3.0+ | Any user provisioned cluster will be automatically updated upon any edit as tolerations were added to the images used for Kubernetes provisioning. -Upgrading to v2.2.0-v2.2.x | Rancher introduced the [system charts](https://github.com/rancher/system-charts) repository which contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository locally and configure Rancher to use that repository. Please follow the instructions to [configure Rancher system charts]({{< baseurl >}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0). +Upgrading to v2.2.0-v2.2.x | Rancher introduced the [system charts](https://github.com/rancher/system-charts) repository which contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository locally and configure Rancher to use that repository. Please follow the instructions to [configure Rancher system charts]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0). Upgrading from v2.0.13 or earlier | If your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. -Upgrading from v2.0.7 or earlier | Rancher introduced the `system` project, which is a project that's automatically created to store important namespaces that Kubernetes needs to operate. During upgrade to v2.0.7+, Rancher expects these namespaces to be unassigned from all projects. Before beginning upgrade, check your system namespaces to make sure that they're unassigned to [prevent cluster networking issues]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#preventing-cluster-networking-issues). +Upgrading from v2.0.7 or earlier | Rancher introduced the `system` project, which is a project that's automatically created to store important namespaces that Kubernetes needs to operate. During upgrade to v2.0.7+, Rancher expects these namespaces to be unassigned from all projects. Before beginning upgrade, check your system namespaces to make sure that they're unassigned to [prevent cluster networking issues]({{}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#preventing-cluster-networking-issues). ### Caveats -Upgrades _to_ or _from_ any chart in the [rancher-alpha repository]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories/) aren't supported. +Upgrades _to_ or _from_ any chart in the [rancher-alpha repository]({{}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories/) aren't supported. ### RKE Add-on Installs **Important: RKE add-on install is only supported up to Rancher v2.0.8** -Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). +Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). -If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha/_index.md index 8522ff2f813..7e74a35c0ed 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha/_index.md @@ -8,7 +8,7 @@ aliases: The following instructions will guide you through using Helm to upgrade a Rancher server that was installed on a Kubernetes cluster. -To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. +To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on). @@ -34,7 +34,7 @@ Follow the steps to upgrade Rancher server: ### A. Back up Your Kubernetes Cluster that is Running Rancher Server -[Take a one-time snapshot]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups/#option-b-one-time-snapshots) +[Take a one-time snapshot]({{}}/rancher/v2.x/en/backups/backups/ha-backups/#option-b-one-time-snapshots) of your Kubernetes cluster running Rancher server. You'll use the snapshot as a restoration point if something goes wrong during upgrade. ### B. Update the Helm chart repository @@ -47,7 +47,7 @@ of your Kubernetes cluster running Rancher server. You'll use the snapshot as a 1. Get the repository name that you used to install Rancher. - For information about the repos and their differences, see [Helm Chart Repositories]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). + For information about the repos and their differences, see [Helm Chart Repositories]({{}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). {{< release-channel >}} @@ -59,7 +59,7 @@ of your Kubernetes cluster running Rancher server. You'll use the snapshot as a rancher- https://releases.rancher.com/server-charts/ ``` - > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. + > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{}}/rancher/v2.x/en/installation/options/server-tags/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. 1. Fetch the latest chart to install Rancher from the Helm chart repository. @@ -109,7 +109,7 @@ helm upgrade rancher rancher-/rancher \ If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manger due to the API change in cert-manger v0.11. -Please refer the [Upgrading Cert-Manager]({{< baseurl >}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) page for more information. +Please refer the [Upgrading Cert-Manager]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) page for more information. 1. Uninstall Rancher @@ -190,8 +190,8 @@ Log into Rancher to confirm that the upgrade succeeded. >**Having network issues following upgrade?** > -> See [Restoring Cluster Networking]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). +> See [Restoring Cluster Networking]({{}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). ## Rolling Back -Should something go wrong, follow the [roll back]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. +Should something go wrong, follow the [roll back]({{}}/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha/helm2/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha/helm2/_index.md index 1c717cd0a03..f5d762cc5f5 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha/helm2/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha/helm2/_index.md @@ -11,7 +11,7 @@ weight: 1050 The following instructions will guide you through using Helm to upgrade a Rancher server that is installed on a Kubernetes cluster. -To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. +To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on). @@ -37,7 +37,7 @@ Follow the steps to upgrade Rancher server: ### A. Back up Your Kubernetes Cluster that is Running Rancher Server -[Take a one-time snapshot]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups/#option-b-one-time-snapshots) +[Take a one-time snapshot]({{}}/rancher/v2.x/en/backups/backups/ha-backups/#option-b-one-time-snapshots) of your Kubernetes cluster running Rancher server. You'll use the snapshot as a restoration point if something goes wrong during upgrade. ### B. Update the Helm chart repository @@ -50,7 +50,7 @@ of your Kubernetes cluster running Rancher server. You'll use the snapshot as a 1. Get the repository name that you used to install Rancher. - For information about the repos and their differences, see [Helm Chart Repositories]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). + For information about the repos and their differences, see [Helm Chart Repositories]({{}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). {{< release-channel >}} @@ -62,7 +62,7 @@ of your Kubernetes cluster running Rancher server. You'll use the snapshot as a rancher- https://releases.rancher.com/server-charts/ ``` - > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. + > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{}}/rancher/v2.x/en/installation/options/server-tags/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. 1. Fetch the latest chart to install Rancher from the Helm chart repository. @@ -110,7 +110,7 @@ helm upgrade rancher-/rancher \ If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manger due to the API change in cert-manger v0.11. -Please refer the [Upgrading Cert-Manager]({{< baseurl >}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) page for more information. +Please refer the [Upgrading Cert-Manager]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) page for more information. 1. Uninstall Rancher @@ -192,8 +192,8 @@ Log into Rancher to confirm that the upgrade succeeded. >**Having network issues following upgrade?** > -> See [Restoring Cluster Networking]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). +> See [Restoring Cluster Networking]({{}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). ## Rolling Back -Should something go wrong, follow the [roll back]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. +Should something go wrong, follow the [roll back]({{}}/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md index c5e8091bdba..77b7a515e7a 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md @@ -57,7 +57,7 @@ kubectl -n cattle-system get secret cattle-keys-server -o jsonpath --template='{ Remove the Kubernetes objects created by the RKE install. -> **Note:** Removing these Kubernetes components will not affect the Rancher configuration or database, but with any maintenance it is a good idea to create a backup of the data before hand. See [Creating Backups-Kubernetes Install]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups) for details. +> **Note:** Removing these Kubernetes components will not affect the Rancher configuration or database, but with any maintenance it is a good idea to create a backup of the data before hand. See [Creating Backups-Kubernetes Install]({{}}/rancher/v2.x/en/backups/backups/ha-backups) for details. ``` kubectl -n cattle-system delete ingress cattle-ingress-http @@ -105,5 +105,5 @@ addons: |- From here follow the standard install steps. -* [3 - Initialize Helm]({{< baseurl >}}/rancher/v2.x/en/installation/options/helm2/helm-init/) -* [4 - Install Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/) +* [3 - Initialize Helm]({{}}/rancher/v2.x/en/installation/options/helm2/helm-init/) +* [4 - Install Rancher]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/) diff --git a/content/rancher/v2.x/en/upgrades/upgrades/namespace-migration/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/namespace-migration/_index.md index 2d85fdad4d6..56855eb7b5e 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/namespace-migration/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/namespace-migration/_index.md @@ -52,11 +52,11 @@ You can prevent cluster networking issues from occurring during your upgrade to >1 Only displays if this feature is enabled for the cluster.
Moving namespaces out of projects
- ![Moving Namespaces]({{< baseurl >}}/img/rancher/move-namespaces.png) + ![Moving Namespaces]({{}}/img/rancher/move-namespaces.png) 1. Repeat these steps for each cluster where you've assigned system namespaces to projects. -**Result:** All system namespaces are moved out of Rancher projects. You can now safely begin the [upgrade]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades). +**Result:** All system namespaces are moved out of Rancher projects. You can now safely begin the [upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades). ## Restoring Cluster Networking @@ -171,8 +171,8 @@ Reset the cluster nodes' network policies to restore connectivity.
If you can access Rancher, but one or more of the clusters that you launched using Rancher has no networking, you can repair them by moving the -- From the cluster's [embedded kubectl shell]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-shell). -- By [downloading the cluster kubeconfig file and running it]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-and-a-kubeconfig-file) from your workstation. +- From the cluster's [embedded kubectl shell]({{}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-shell). +- By [downloading the cluster kubeconfig file and running it]({{}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-and-a-kubeconfig-file) from your workstation. ``` for namespace in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get ns -o custom-columns=NAME:.metadata.name --no-headers); do diff --git a/content/rancher/v2.x/en/upgrades/upgrades/single-node/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/single-node/_index.md index 6e859c2f9b0..3b1448cc156 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/single-node/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/single-node/_index.md @@ -28,7 +28,7 @@ In this command, `` is the name of your Rancher containe Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the upgrade. Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{< baseurl >}}/img/rancher/placeholder-ref.png) +![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) | Placeholder | Example | Description | | -------------------------- | -------------------------- | --------------------------------------------------------- | @@ -95,7 +95,7 @@ Pull the image of the Rancher version that you want to upgrade to. Placeholder | Description ------------|------------- -`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. ``` docker pull rancher/rancher: @@ -129,7 +129,7 @@ If you have selected to use the Rancher generated self-signed certificate, you a Placeholder | Description ------------|------------- -`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. ``` docker run -d --volumes-from rancher-data \ @@ -152,7 +152,7 @@ Placeholder | Description `` | The path to your full certificate chain. `` | The path to the private key for your certificate. `` | The path to the certificate authority's certificate. -`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. ``` docker run -d --volumes-from rancher-data \ @@ -176,7 +176,7 @@ Placeholder | Description `` | The path to the directory containing your certificate files. `` | The path to your full certificate chain. `` | The path to the private key for your certificate. -`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. ``` docker run -d --volumes-from rancher-data \ @@ -201,7 +201,7 @@ If you have selected to use [Let's Encrypt](https://letsencrypt.org/) certificat Placeholder | Description ------------|------------- -`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. `` | The domain address that you had originally started with ``` @@ -230,7 +230,7 @@ If you have selected to use the Rancher generated self-signed certificate, you a Placeholder | Description ------------|------------- `` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to to upgrade to. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to to upgrade to. ``` docker run -d --volumes-from rancher-data \ @@ -255,7 +255,7 @@ Placeholder | Description `` | The path to the private key for your certificate. `` | The path to the certificate authority's certificate. `` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. ``` docker run -d --restart=unless-stopped \ @@ -281,7 +281,7 @@ Placeholder | Description `` | The path to your full certificate chain. `` | The path to the private key for your certificate. `` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. > **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. @@ -308,7 +308,7 @@ Log into Rancher. Confirm that the upgrade succeeded by checking the version dis >**Having network issues in your user clusters following upgrade?** > -> See [Restoring Cluster Networking]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). +> See [Restoring Cluster Networking]({{}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). ### F. Clean up Your Old Rancher Server Container @@ -317,4 +317,4 @@ Remove the previous Rancher server container. If you only stop the previous Ranc ## Rolling Back -If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/). +If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback]({{}}/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/). diff --git a/content/rancher/v2.x/en/user-settings/_index.md b/content/rancher/v2.x/en/user-settings/_index.md index 4fea8416f2c..c048530c560 100644 --- a/content/rancher/v2.x/en/user-settings/_index.md +++ b/content/rancher/v2.x/en/user-settings/_index.md @@ -7,12 +7,12 @@ aliases: Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. -![User Settings Menu]({{< baseurl >}}/img/rancher/user-settings.png) +![User Settings Menu]({{}}/img/rancher/user-settings.png) The available user settings are: -- [API & Keys]({{< baseurl >}}/rancher/v2.x/en/user-settings/api-keys/): If you want to interact with Rancher programmatically, you need an API key. Follow the directions in this section to obtain a key.gferfgre -- [Cloud Credentials]({{< baseurl >}}/rancher/v2.x/en/user-settings/cloud-credentials/): Manage cloud credentials [used by node templates]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) to [provision nodes for clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters). Note: Available as of v2.2.0. -- [Node Templates]({{< baseurl >}}/rancher/v2.x/en/user-settings/node-templates): Manage templates [used by Rancher to provision nodes for clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters). -- [Preferences]({{< baseurl >}}/rancher/v2.x/en/user-settings/preferences): Sets superficial preferences for the Rancher UI. +- [API & Keys]({{}}/rancher/v2.x/en/user-settings/api-keys/): If you want to interact with Rancher programmatically, you need an API key. Follow the directions in this section to obtain a key.gferfgre +- [Cloud Credentials]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/): Manage cloud credentials [used by node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) to [provision nodes for clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters). Note: Available as of v2.2.0. +- [Node Templates]({{}}/rancher/v2.x/en/user-settings/node-templates): Manage templates [used by Rancher to provision nodes for clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters). +- [Preferences]({{}}/rancher/v2.x/en/user-settings/preferences): Sets superficial preferences for the Rancher UI. - Log Out: Ends your user session. diff --git a/content/rancher/v2.x/en/user-settings/api-keys/_index.md b/content/rancher/v2.x/en/user-settings/api-keys/_index.md index a824b0d58f5..bddabe76c3c 100644 --- a/content/rancher/v2.x/en/user-settings/api-keys/_index.md +++ b/content/rancher/v2.x/en/user-settings/api-keys/_index.md @@ -29,7 +29,7 @@ API Keys are composed of four components: The API key won't be valid after expiration. Shorter expiration periods are more secure. - A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints]({{< baseurl >}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) for more information. + A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) for more information. 4. Click **Create**. @@ -43,7 +43,7 @@ API Keys are composed of four components: - Enter your API key information into the application that will send requests to the Rancher API. - Learn more about the Rancher endpoints and parameters by selecting **View in API** for an object in the Rancher UI. -- API keys are used for API calls and [Rancher CLI]({{< baseurl >}}/rancher/v2.x/en/cli). +- API keys are used for API calls and [Rancher CLI]({{}}/rancher/v2.x/en/cli). ## Deleting API Keys diff --git a/content/rancher/v2.x/en/user-settings/cloud-credentials/_index.md b/content/rancher/v2.x/en/user-settings/cloud-credentials/_index.md index 57884ad24d5..39a516475c1 100644 --- a/content/rancher/v2.x/en/user-settings/cloud-credentials/_index.md +++ b/content/rancher/v2.x/en/user-settings/cloud-credentials/_index.md @@ -5,7 +5,7 @@ weight: 7011 _Available as of v2.2.0_ -When you create a cluster [hosted by an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. +When you create a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. Node templates can use cloud credentials to access the credential information required to provision nodes in the infrastructure providers. The same cloud credential can be used by multiple node templates. By using a cloud credential, you do not have to re-enter access keys for the same cloud provider. Cloud credentials are stored as Kubernetes secrets. @@ -13,7 +13,7 @@ Cloud credentials are only used by node templates if there are fields marked as You can create cloud credentials in two contexts: -- [During creation of a node template]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for a cluster. +- [During creation of a node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for a cluster. - In the **User Settings** All cloud credentials are bound to the user profile of who created it. They **cannot** be shared across users. @@ -23,11 +23,11 @@ All cloud credentials are bound to the user profile of who created it. They **ca 1. From your user settings, select **User Avatar > Cloud Credentials**. 1. Click **Add Cloud Credential**. 1. Enter a name for the cloud credential. -1. Select a **Cloud Credential Type** from the drop down. The values of this dropdown is based on the `active` [node drivers]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) in Rancher. +1. Select a **Cloud Credential Type** from the drop down. The values of this dropdown is based on the `active` [node drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) in Rancher. 1. Based on the selected cloud credential type, enter the required values to authenticate with the infrastructure provider. 1. Click **Create**. -**Result:** The cloud credential is created and can immediately be used to [create node templates]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). +**Result:** The cloud credential is created and can immediately be used to [create node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). ## Updating a Cloud Credential @@ -37,11 +37,11 @@ When access credentials are changed or compromised, updating a cloud credential 1. Choose the cloud credential you want to edit and click the **Vertical Ellipsis (...) > Edit**. 1. Update the credential information and click **Save**. -**Result:** The cloud credential is updated with the new access credentials. All existing node templates using this cloud credential will automatically use the updated information whenever [new nodes are added]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). +**Result:** The cloud credential is updated with the new access credentials. All existing node templates using this cloud credential will automatically use the updated information whenever [new nodes are added]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). ## Deleting a Cloud Credential -In order to delete cloud credentials, there must not be any node template associated with it. If you are unable to delete the cloud credential, [delete any node templates]({{< baseurl >}}/rancher/v2.x/en/user-settings/node-templates/#deleting-a-node-template) that are still associated to that cloud credential. +In order to delete cloud credentials, there must not be any node template associated with it. If you are unable to delete the cloud credential, [delete any node templates]({{}}/rancher/v2.x/en/user-settings/node-templates/#deleting-a-node-template) that are still associated to that cloud credential. 1. From your user settings, select **User Avatar > Cloud Credentials**. 1. You can either individually delete a cloud credential or bulk delete. diff --git a/content/rancher/v2.x/en/user-settings/node-templates/_index.md b/content/rancher/v2.x/en/user-settings/node-templates/_index.md index 2ebd89b0bd7..c91fa4ca695 100644 --- a/content/rancher/v2.x/en/user-settings/node-templates/_index.md +++ b/content/rancher/v2.x/en/user-settings/node-templates/_index.md @@ -3,9 +3,9 @@ title: Managing Node Templates weight: 7010 --- -When you provision a cluster [hosted by an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. You can create node templates in two contexts: +When you provision a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. You can create node templates in two contexts: -- While [provisioning a node pool cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools). +- While [provisioning a node pool cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools). - At any time, from your [user settings](#creating-a-node-template-from-user-settings). When you create a node template, it is bound to your user profile. Node templates cannot be shared among users. You can delete stale node templates that you no longer user from your user settings. @@ -16,14 +16,14 @@ When you create a node template, it is bound to your user profile. Node template 1. Click **Add Template**. 1. Select one of the cloud providers available. Then follow the instructions on screen to configure the template. -**Result:** The template is configured. You can use the template later when you [provision a node pool cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools). +**Result:** The template is configured. You can use the template later when you [provision a node pool cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools). ## Updating a Node Template 1. From your user settings, select **User Avatar > Node Templates**. 1. Choose the node template that you want to edit and click the **Vertical Ellipsis (...) > Edit**. - > **Note:** As of v2.2.0, the default `active` [node drivers]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use [cloud credentials]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#cloud-credentials). If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. + > **Note:** As of v2.2.0, the default `active` [node drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use [cloud credentials]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#cloud-credentials). If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. 1. Edit the required information and click **Save**. @@ -37,7 +37,7 @@ When creating new node templates from your user settings, you can clone an exist 1. Find the template you want to clone. Then select **Ellipsis > Clone**. 1. Complete the rest of the form. -**Result:** The template is cloned and configured. You can use the template later when you [provision a node pool cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools). +**Result:** The template is cloned and configured. You can use the template later when you [provision a node pool cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools). ## Deleting a Node Template diff --git a/content/rancher/v2.x/en/v1.6-migration/_index.md b/content/rancher/v2.x/en/v1.6-migration/_index.md index 8d065e00458..0766c009821 100644 --- a/content/rancher/v2.x/en/v1.6-migration/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/_index.md @@ -13,20 +13,20 @@ This video demonstrates a complete walk through of migration from Rancher v1.6 t ## Migration Plan ->**Want to more about Kubernetes before getting started?** Read our [Kubernetes Introduction]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/kub-intro). +>**Want to more about Kubernetes before getting started?** Read our [Kubernetes Introduction]({{}}/rancher/v2.x/en/v1.6-migration/kub-intro). -- [1. Get Started]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/get-started) +- [1. Get Started]({{}}/rancher/v2.x/en/v1.6-migration/get-started) >**Already a Kubernetes user in v1.6?** > > _Get Started_ is the only section you need to review for migration to v2.x. You can skip everything else. -- [2. Migrate Your Services]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/) -- [3. Expose Your Services]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/expose-services/) -- [4. Configure Health Checks]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/monitor-apps) -- [5. Schedule Your Services]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/) -- [6. Service Discovery]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/discover-services/) -- [7. Load Balancing]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/load-balancing/) +- [2. Migrate Your Services]({{}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/) +- [3. Expose Your Services]({{}}/rancher/v2.x/en/v1.6-migration/expose-services/) +- [4. Configure Health Checks]({{}}/rancher/v2.x/en/v1.6-migration/monitor-apps) +- [5. Schedule Your Services]({{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/) +- [6. Service Discovery]({{}}/rancher/v2.x/en/v1.6-migration/discover-services/) +- [7. Load Balancing]({{}}/rancher/v2.x/en/v1.6-migration/load-balancing/) ## Migration Example Files @@ -48,4 +48,4 @@ During migration, we'll export these services from Rancher v1.6. The export gen A file for Rancher-specific functionality such as health checks and load balancers. These files cannot be read by Rancher v2.x, so don't worry about their contents—we're discarding them and recreating them using the v2.x UI. -### [Next: Get Started]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/get-started) +### [Next: Get Started]({{}}/rancher/v2.x/en/v1.6-migration/get-started) diff --git a/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md b/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md index 90112383200..0df7741ae6b 100644 --- a/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md @@ -9,7 +9,7 @@ This document will also show you how to link the workloads and services that you
Resolve the output.txt Link Directive
-![Resolve Link Directive]({{< baseurl >}}/img/rancher/resolve-links.png) +![Resolve Link Directive]({{}}/img/rancher/resolve-links.png) ## In This Document @@ -27,7 +27,7 @@ This document will also show you how to link the workloads and services that you For Rancher v2.x, we've replaced the Rancher DNS microservice used in v1.6 with native [Kubernetes DNS support](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/), which provides equivalent service discovery for Kubernetes workloads and pods. Former Cattle users can replicate all the service discovery features from Rancher v1.6 in v2.x. There's no loss of functionality. -Kubernetes schedules a DNS pod and service in the cluster, which is similar to the [Rancher v1.6 DNS microservice]({{< baseurl >}}/rancher/v1.6/en/cattle/internal-dns-service/#internal-dns-service-in-cattle-environments). Kubernetes then configures its kubelets to route all DNS lookups to this DNS service, which is skyDNS, a flavor of the default Kube-DNS implementation. +Kubernetes schedules a DNS pod and service in the cluster, which is similar to the [Rancher v1.6 DNS microservice]({{}}/rancher/v1.6/en/cattle/internal-dns-service/#internal-dns-service-in-cattle-environments). Kubernetes then configures its kubelets to route all DNS lookups to this DNS service, which is skyDNS, a flavor of the default Kube-DNS implementation. The following table displays each service discovery feature available in the two Rancher releases. @@ -60,11 +60,11 @@ Pods can also be resolved using the `hostname` and `subdomain` fields if set in When you migrate v1.6 services to v2.x, Rancher does not automatically create a Kubernetes service record for each migrated deployment. Instead, you'll have to link the deployment and service together manually, using any of the methods listed below. -In the image below, the `web-deployment.yml` and `web-service.yml` files [created after parsing]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/#migration-example-file-output) our [migration example services]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/#migration-example-files) are linked together. +In the image below, the `web-deployment.yml` and `web-service.yml` files [created after parsing]({{}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/#migration-example-file-output) our [migration example services]({{}}/rancher/v2.x/en/v1.6-migration/#migration-example-files) are linked together.
Linked Workload and Kubernetes Service
-![Linked Workload and Kubernetes Service]({{< baseurl >}}/img/rancher/linked-service-workload.png) +![Linked Workload and Kubernetes Service]({{}}/img/rancher/linked-service-workload.png) ### Service Name Alias Creation @@ -76,7 +76,7 @@ Using the v2.x UI, use the context menu to navigate to the `Project` view. Then Click **Add Record** to create new DNS records. Then view the various options supported to link to external services or to create aliases for another workload, DNS record, or set of pods.
Add Service Discovery Record
-![Add Service Discovery Record]({{< baseurl >}}/img/rancher/add-record.png) +![Add Service Discovery Record]({{}}/img/rancher/add-record.png) The following table indicates which alias options are implemented natively by Kubernetes and which options are implemented by Rancher leveraging Kubernetes. @@ -89,4 +89,4 @@ Pointing to another workload | | ✓ Create alias for another DNS record | | ✓ -### [Next: Load Balancing]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/load-balancing/) +### [Next: Load Balancing]({{}}/rancher/v2.x/en/v1.6-migration/load-balancing/) diff --git a/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md b/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md index 35c81ae5e58..5e7207b1630 100644 --- a/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md @@ -9,7 +9,7 @@ Use this document to correct workloads that list `ports` in `output.txt`. You ca
Resolve ports for the web Workload
-![Resolve Ports]({{< baseurl >}}/img/rancher/resolve-ports.png) +![Resolve Ports]({{}}/img/rancher/resolve-ports.png) ## In This Document @@ -38,7 +38,7 @@ A _HostPort_ is a port exposed to the public on a _specific node_ running one or In the following diagram, a user is trying to access an instance of Nginx, which is running within a pod on port 80. However, the Nginx deployment is assigned a HostPort of 9890. The user can connect to this pod by browsing to its host IP address, followed by the HostPort in use (9890 in case). -![HostPort Diagram]({{< baseurl >}}/img/rancher/hostPort.svg) +![HostPort Diagram]({{}}/img/rancher/hostPort.svg) #### HostPort Pros @@ -71,7 +71,7 @@ NodePorts help you circumvent an IP address shortcoming. Although pods can be re In the following diagram, a user is trying to connect to an instance of Nginx running in a Kubernetes cluster managed by Rancher. Although he knows what NodePort Nginx is operating on (30216 in this case), he does not know the IP address of the specific node that the pod is running on. However, with NodePort enabled, he can connect to the pod using the IP address for _any_ node in the cluster. Kubeproxy will forward the request to the correct node and pod. -![NodePort Diagram]({{< baseurl >}}/img/rancher/nodePort.svg) +![NodePort Diagram]({{}}/img/rancher/nodePort.svg) NodePorts are available within your Kubernetes cluster on an internal IP. If you want to expose pods external to the cluster, use NodePorts in conjunction with an external load balancer. Traffic requests from outside your cluster for `:` are directed to the workload. The `` can be the IP address of any node in your Kubernetes cluster. @@ -101,4 +101,4 @@ For example, for the `web-deployment.yml` file parsed from v1.6 that we've been {{< img "/img/rancher/set-nodeport.gif" "Set NodePort" >}} -### [Next: Configure Health Checks]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/monitor-apps) +### [Next: Configure Health Checks]({{}}/rancher/v2.x/en/v1.6-migration/monitor-apps) diff --git a/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md b/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md index 4d4f2d9ad40..453f833724e 100644 --- a/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md @@ -22,7 +22,7 @@ The first step in migrating from v1.6 to v2.x is to install the Rancher v2.x Ser New for v2.x, all communication to Rancher Server is encrypted. The procedures below instruct you not only on installation of Rancher, but also creation and installation of these certificates. -Before installing v2.x, provision one host or more to function as your Rancher Server(s). You can find the requirements for these hosts in [Server Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/). +Before installing v2.x, provision one host or more to function as your Rancher Server(s). You can find the requirements for these hosts in [Server Requirements]({{}}/rancher/v2.x/en/installation/requirements/). After provisioning your node(s), install Rancher: @@ -34,19 +34,19 @@ After provisioning your node(s), install Rancher: For production environments where your user base requires constant access to your cluster, we recommend installing Rancher in a high availability Kubernetes installation. This installation procedure provisions a three-node cluster and installs Rancher on each node using a Helm chart. - >**Important Difference:** Although you could install Rancher v1.6 in a high-availability Kubernetes configuration using an external database and a Docker command on each node, Rancher v2.x in a Kubernetes install requires an existing Kubernetes cluster. Review [Kubernetes Install]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/) for full requirements. + >**Important Difference:** Although you could install Rancher v1.6 in a high-availability Kubernetes configuration using an external database and a Docker command on each node, Rancher v2.x in a Kubernetes install requires an existing Kubernetes cluster. Review [Kubernetes Install]({{}}/rancher/v2.x/en/installation/k8s-install/) for full requirements. ## B. Configure Authentication -After your Rancher v2.x Server is installed, we recommend configuring external authentication (like Active Directory or GitHub) so that users can log into Rancher using their single sign-on. For a full list of supported authentication providers and instructions on how to configure them, see [Authentication]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication). +After your Rancher v2.x Server is installed, we recommend configuring external authentication (like Active Directory or GitHub) so that users can log into Rancher using their single sign-on. For a full list of supported authentication providers and instructions on how to configure them, see [Authentication]({{}}/rancher/v2.x/en/admin-settings/authentication).
Rancher v2.x Authentication
-![Rancher v2.x Authentication]({{< baseurl >}}/img/rancher/auth-providers.svg) +![Rancher v2.x Authentication]({{}}/img/rancher/auth-providers.svg) ### Local Users -Although we recommend using an external authentication provider, Rancher v1.6 and v2.x both offer support for users local to Rancher. However, these users cannot be migrated from Rancher v1.6 to v2.x. If you used local users in Rancher v1.6 and want to continue this practice in v2.x, you'll need to [manually recreate these user accounts]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/) and assign them access rights. +Although we recommend using an external authentication provider, Rancher v1.6 and v2.x both offer support for users local to Rancher. However, these users cannot be migrated from Rancher v1.6 to v2.x. If you used local users in Rancher v1.6 and want to continue this practice in v2.x, you'll need to [manually recreate these user accounts]({{}}/rancher/v2.x/en/admin-settings/authentication/) and assign them access rights. As a best practice, you should use a hybrid of external _and_ local authentication. This practice provides access to Rancher should your external authentication experience an interruption, as you can still log in using a local user account. Set up a few local accounts as administrative users of Rancher. @@ -61,7 +61,7 @@ Begin work in Rancher v2.x by using it to provision a new Kubernetes cluster, wh A cluster and project in combined together in Rancher v2.x is equivalent to a v1.6 environment. A _cluster_ is the compute boundary (i.e., your hosts) and a _project_ is an administrative boundary (i.e., a grouping of namespaces used to assign access rights to users). -There's more basic info on provisioning clusters in the headings below, but for full information, see [Provisioning Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/). +There's more basic info on provisioning clusters in the headings below, but for full information, see [Provisioning Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). ### Clusters @@ -69,32 +69,32 @@ In Rancher v1.6, compute nodes were added to an _environment_. Rancher v2.x esch Rancher v2.x lets you launch a Kubernetes cluster anywhere. Host your cluster using: -- A [hosted Kubernetes provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/). -- A [pool of nodes from an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). Rancher launches Kubernetes on the nodes. -- Any [custom node(s)]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/). Rancher can launch Kubernetes on the nodes, be they bare metal servers, virtual machines, or cloud hosts on a less popular infrastructure provider. +- A [hosted Kubernetes provider]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/). +- A [pool of nodes from an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). Rancher launches Kubernetes on the nodes. +- Any [custom node(s)]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/). Rancher can launch Kubernetes on the nodes, be they bare metal servers, virtual machines, or cloud hosts on a less popular infrastructure provider. ### Projects -Additionally, Rancher v2.x introduces [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/), which are objects that divide clusters into different application groups that are useful for applying user permissions. This model of clusters and projects allow for multi-tenancy because hosts are owned by the cluster, and the cluster can be further divided into multiple projects where users can manage their apps, but not those of others. +Additionally, Rancher v2.x introduces [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/), which are objects that divide clusters into different application groups that are useful for applying user permissions. This model of clusters and projects allow for multi-tenancy because hosts are owned by the cluster, and the cluster can be further divided into multiple projects where users can manage their apps, but not those of others. When you create a cluster, two projects are automatically created: - The `System` project, which includes system namespaces where important Kubernetes resources are running (like ingress controllers and cluster dns services) - The `Default` project. -However, for production environments, we recommend [creating your own project]({{< baseurl >}}/rancher/v2.x/en/project-admin/namespaces/#creating-projects) and giving it a descriptive name. +However, for production environments, we recommend [creating your own project]({{}}/rancher/v2.x/en/project-admin/namespaces/#creating-projects) and giving it a descriptive name. -After provisioning a new cluster and project, you can authorize your users to access and use project resources. Similarly to Rancher v1.6 environments, Rancher v2.x allows you to [assign users to projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/editing-projects/). By assigning users to projects, you can limit what applications and resources a user can access. +After provisioning a new cluster and project, you can authorize your users to access and use project resources. Similarly to Rancher v1.6 environments, Rancher v2.x allows you to [assign users to projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/editing-projects/). By assigning users to projects, you can limit what applications and resources a user can access. ## D. Create Stacks -In Rancher v1.6, _stacks_ were used to group together the services that belong to your application. In v2.x, you need to [create namespaces]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#creating-namespaces), which are the v2.x equivalent of stacks, for the same purpose. +In Rancher v1.6, _stacks_ were used to group together the services that belong to your application. In v2.x, you need to [create namespaces]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#creating-namespaces), which are the v2.x equivalent of stacks, for the same purpose. In Rancher v2.x, namespaces are child objects to projects. When you create a project, a `default` namespace is added to the project, but you can create your own to parallel your stacks from v1.6. During migration, if you don't explicitly define which namespace a service should be deployed to, it's deployed to the `default` namespace. -Just like v1.6, Rancher v2.x supports service discovery within and across namespaces (we'll get to [service discovery]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/discover-services) soon). +Just like v1.6, Rancher v2.x supports service discovery within and across namespaces (we'll get to [service discovery]({{}}/rancher/v2.x/en/v1.6-migration/discover-services) soon). -### [Next: Migrate Your Services]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/run-migration-tool) +### [Next: Migrate Your Services]({{}}/rancher/v2.x/en/v1.6-migration/run-migration-tool) diff --git a/content/rancher/v2.x/en/v1.6-migration/kub-intro/_index.md b/content/rancher/v2.x/en/v1.6-migration/kub-intro/_index.md index e3b188466f7..a29115d4d13 100644 --- a/content/rancher/v2.x/en/v1.6-migration/kub-intro/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/kub-intro/_index.md @@ -36,4 +36,4 @@ Because Rancher v1.6 defaulted to our Cattle container orchestrator, it primaril More detailed information on Kubernetes concepts can be found in the [Kubernetes Concepts Documentation](https://kubernetes.io/docs/concepts/). -### [Next: Get Started]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/get-started/) +### [Next: Get Started]({{}}/rancher/v2.x/en/v1.6-migration/get-started/) diff --git a/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md b/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md index 6885d6794a1..183eef1bee3 100644 --- a/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md @@ -5,13 +5,13 @@ weight: 700 If your applications are public-facing and consume significant traffic, you should place a load balancer in front of your cluster so that users can always access their apps without service interruption. Typically, you can fulfill a high volume of service requests by [horizontally scaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) your deployment, which spins up additional application containers as traffic ramps up. However, this technique requires routing that distributes traffic across your nodes efficiently. In cases where you need to accommodate public traffic that scales up and down, you'll need a load balancer. -As outlined in [its documentation]({{< baseurl >}}/rancher/v1.6/en/cattle/adding-load-balancers/), Rancher v1.6 provided rich support for load balancing using its own microservice powered by HAProxy, which supports HTTP, HTTPS, TCP hostname, and path-based routing. Most of these same features are available in v2.x. However, load balancers that you used with v1.6 cannot be migrated to v2.x. You'll have to manually recreate your v1.6 load balancer in v2.x. +As outlined in [its documentation]({{}}/rancher/v1.6/en/cattle/adding-load-balancers/), Rancher v1.6 provided rich support for load balancing using its own microservice powered by HAProxy, which supports HTTP, HTTPS, TCP hostname, and path-based routing. Most of these same features are available in v2.x. However, load balancers that you used with v1.6 cannot be migrated to v2.x. You'll have to manually recreate your v1.6 load balancer in v2.x. If you encounter the `output.txt` text below after parsing your v1.6 Compose files to Kubernetes manifests, you'll have to resolve it by manually creating a load balancer in v2.x.
output.txt Load Balancer Directive
-![Resolve Load Balancer Directive]({{< baseurl >}}/img/rancher/resolve-load-balancer.png) +![Resolve Load Balancer Directive]({{}}/img/rancher/resolve-load-balancer.png) ## In This Document @@ -35,7 +35,7 @@ By default, Rancher v2.x replaces the v1.6 load balancer microservice with the n ## Load Balancer Deployment -In Rancher v1.6, you could add port/service rules for configuring your HAProxy to load balance for target services. You could also configure the hostname/path-based routing rules. +In Rancher v1.6, you could add port/service rules for configuring your HA proxy to load balance for target services. You could also configure the hostname/path-based routing rules. Rancher v2.x offers similar functionality, but load balancing is instead handled by Ingress. An Ingress is a specification of rules that a controller component applies to your load balancer. The actual load balancer can run outside of your cluster or within it. @@ -43,7 +43,7 @@ By default, Rancher v2.x deploys NGINX Ingress Controller on clusters provisione RKE deploys NGINX Ingress Controller as a [Kubernetes DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), meaning that an NGINX instance is deployed on every node in the cluster. NGINX acts like an Ingress Controller listening to Ingress creation within your entire cluster, and it also configures itself as the load balancer to satisfy the Ingress rules. The DaemonSet is configured with hostNetwork to expose two ports: 80 and 443. -For more information NGINX Ingress Controller, their deployment as DaemonSets, deployment configuration options, see the [RKE documentation]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/). +For more information NGINX Ingress Controller, their deployment as DaemonSets, deployment configuration options, see the [RKE documentation]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/). ## Load Balancing Architecture @@ -55,13 +55,13 @@ In Rancher v1.6 you could deploy a scalable load balancer service within your st
Rancher v1.6 Load Balancing Architecture
-![Rancher v1.6 Load Balancing]({{< baseurl >}}/img/rancher/cattle-load-balancer.svg) +![Rancher v1.6 Load Balancing]({{}}/img/rancher/cattle-load-balancer.svg) The Rancher v2.x Ingress Controller is a DaemonSet, it is globally deployed on all schedulable nodes to serve your entire Kubernetes Cluster. Therefore, when you program the Ingress rules, you must use a unique hostname and path to point to your workloads, as the load balancer node IP addresses and ports 80 and 443 are common access points for all workloads.
Rancher v2.x Load Balancing Architecture
-![Rancher v2.x Load Balancing]({{< baseurl >}}/img/rancher/kubernetes-load-balancer.svg) +![Rancher v2.x Load Balancing]({{}}/img/rancher/kubernetes-load-balancer.svg) ## Ingress Caveats @@ -79,13 +79,13 @@ You can launch a new load balancer to replace your load balancer from v1.6. Usin >**Prerequisite:** Before deploying Ingress, you must have a workload deployed that's running a scale of two or more pods. > -![Workload Scale]({{< baseurl >}}/img/rancher/workload-scale.png) +![Workload Scale]({{}}/img/rancher/workload-scale.png) For balancing between these two pods, you must create a Kubernetes Ingress rule. To create this rule, navigate to your cluster and project, and click **Resources > Workloads > Load Balancing.** (In versions prior to v2.3.0, click **Workloads > Load Balancing.**) Then click **Add Ingress**. This GIF below depicts how to add Ingress to one of your projects.
Browsing to Load Balancer Tab and Adding Ingress
-![Adding Ingress]({{< baseurl >}}/img/rancher/add-ingress.gif) +![Adding Ingress]({{}}/img/rancher/add-ingress.gif) Similar to a service/port rules in Rancher v1.6, here you can specify rules targeting your workload's container port. The sections below demonstrate how to create Ingress rules. @@ -97,13 +97,13 @@ For example, let's say you have multiple workloads deployed to a single namespac
Ingress: Path-Based Routing Configuration
-![Ingress: Path-Based Routing Configuration]({{< baseurl >}}/img/rancher/add-ingress-form.png) +![Ingress: Path-Based Routing Configuration]({{}}/img/rancher/add-ingress-form.png) Rancher v2.x also places a convenient link to the workloads on the Ingress record. If you configure an external DNS to program the DNS records, this hostname can be mapped to the Kubernetes Ingress address.
Workload Links
-![Load Balancer Links to Workloads]({{< baseurl >}}/img/rancher/load-balancer-links.png) +![Load Balancer Links to Workloads]({{}}/img/rancher/load-balancer-links.png) The Ingress address is the IP address in your cluster that the Ingress Controller allocates for your workload. You can reach your workload by browsing to this IP address. Use `kubectl` command below to see the Ingress address assigned by the controller: @@ -115,24 +115,24 @@ kubectl get ingress Rancher v2.x Ingress functionality supports the HTTPS protocol, but if you want to use it, you need to use a valid SSL/TLS certificate. While configuring Ingress rules, use the **SSL/TLS Certificates** section to configure a certificate. -- We recommend [uploading a certificate]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/certificates/) from a known certificate authority (you'll have to do this before configuring Ingress). Then, while configuring your load balancer, use the **Choose a certificate** option and select the uploaded certificate that you want to use. -- If you have configured [NGINX default certificate]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/#configuring-an-nginx-default-certificate), you can select **Use default ingress controller certificate**. +- We recommend [uploading a certificate]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/) from a known certificate authority (you'll have to do this before configuring Ingress). Then, while configuring your load balancer, use the **Choose a certificate** option and select the uploaded certificate that you want to use. +- If you have configured [NGINX default certificate]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/#configuring-an-nginx-default-certificate), you can select **Use default ingress controller certificate**.
Load Balancer Configuration: SSL/TLS Certificate Section
-![SSL/TLS Certificates Section]({{< baseurl >}}/img/rancher/load-balancer-ssl-certs.png) +![SSL/TLS Certificates Section]({{}}/img/rancher/load-balancer-ssl-certs.png) ### TCP Load Balancing Options #### Layer-4 Load Balancer -For the TCP protocol, Rancher v2.x supports configuring a Layer 4 load balancer using the cloud provider in which your Kubernetes cluster is deployed. Once this load balancer appliance is configured for your cluster, when you choose the option of a `Layer-4 Load Balancer` for port-mapping during workload deployment, Rancher automatically creates a corresponding load balancer service. This service will call the corresponding cloud provider and configure the load balancer appliance to route requests to the appropriate pods. See [Cloud Providers]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) for information on how to configure LoadBalancer services for your cloud provider. +For the TCP protocol, Rancher v2.x supports configuring a Layer 4 load balancer using the cloud provider in which your Kubernetes cluster is deployed. Once this load balancer appliance is configured for your cluster, when you choose the option of a `Layer-4 Load Balancer` for port-mapping during workload deployment, Rancher automatically creates a corresponding load balancer service. This service will call the corresponding cloud provider and configure the load balancer appliance to route requests to the appropriate pods. See [Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) for information on how to configure LoadBalancer services for your cloud provider. For example, if we create a deployment named `myapp` and specify a Layer 4 load balancer in the **Port Mapping** section, Rancher will automatically add an entry to the **Load Balancer** tab named `myapp-loadbalancer`.
Workload Deployment: Layer 4 Load Balancer Creation
-![Deploy Layer-4 Load Balancer]({{< baseurl >}}/img/rancher/deploy-workload-load-balancer.png) +![Deploy Layer-4 Load Balancer]({{}}/img/rancher/deploy-workload-load-balancer.png) Once configuration of the load balancer succeeds, the Rancher UI provides a link to your workload's public endpoint. @@ -144,13 +144,13 @@ However, there is a workaround to use NGINX's TCP balancing by creating a Kubern To configure NGINX to expose your services via TCP, you can add the ConfigMap `tcp-services` that should exist in the `ingress-nginx` namespace. This namespace also contains the NGINX Ingress Controller pods. -![Layer-4 Load Balancer: ConfigMap Workaround]({{< baseurl >}}/img/rancher/layer-4-lb-config-map.png) +![Layer-4 Load Balancer: ConfigMap Workaround]({{}}/img/rancher/layer-4-lb-config-map.png) The key in the ConfigMap entry should be the TCP port that you want to expose for public access: `:`. As shown above, two workloads are listed in the `Default` namespace. For example, the first entry in the ConfigMap above instructs NGINX to expose the `myapp` workload (the one in the `default` namespace that's listening on private port 80) over external port `6790`. Adding these entries to the ConfigMap automatically updates the NGINX pods to configure these workloads for TCP balancing. The workloads exposed should be available at `:`. If they are not accessible, you might have to expose the TCP port explicitly using a NodePort service. ## Rancher v2.x Load Balancing Limitations -Cattle provided feature-rich load balancer support that is [well documented]({{< baseurl >}}/rancher/v1.6/en/cattle/adding-load-balancers/#load-balancers). Some of these features do not have equivalents in Rancher v2.x. This is the list of such features: +Cattle provided feature-rich load balancer support that is [well documented]({{}}/rancher/v1.6/en/cattle/adding-load-balancers/#load-balancers). Some of these features do not have equivalents in Rancher v2.x. This is the list of such features: - No support for SNI in current NGINX Ingress Controller. - TCP load balancing requires a load balancer appliance enabled by cloud provider within the cluster. There is no Ingress support for TCP on Kubernetes. diff --git a/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md b/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md index c9ea17668c4..b1a2f1cc110 100644 --- a/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md @@ -13,7 +13,7 @@ For example, for the image below, we would configure liveness probes for the `we
Resolve health_check for the web and webLB Workloads
-![Resolve health_check]({{< baseurl >}}/img/rancher/resolve-health-checks.png) +![Resolve health_check]({{}}/img/rancher/resolve-health-checks.png) ## In This Document @@ -33,7 +33,7 @@ The health check microservice features two types of health checks, which have a - **TCP health checks**: - These health checks check if a TCP connection opens at the specified port for the monitored service. For full details, see the [Rancher v1.6 documentation]({{< baseurl >}}/rancher/v1.6/en/cattle/health-checks/). + These health checks check if a TCP connection opens at the specified port for the monitored service. For full details, see the [Rancher v1.6 documentation]({{}}/rancher/v1.6/en/cattle/health-checks/). - **HTTP health checks**: @@ -73,7 +73,7 @@ The following diagram displays kubelets running probes on containers they are mo ## Configuring Probes in Rancher v2.x -The [migration-tool CLI]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/) cannot parse health checks from Compose files to Kubernetes manifest. Therefore, if want you to add health checks to your Rancher v2.x workloads, you'll have to add them manually. +The [migration-tool CLI]({{}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/) cannot parse health checks from Compose files to Kubernetes manifest. Therefore, if want you to add health checks to your Rancher v2.x workloads, you'll have to add them manually. Using the Rancher v2.x UI, you can add TCP or HTTP health checks to Kubernetes workloads. By default, Rancher asks you to configure a readiness check for your workloads and applies a liveness check using the same configuration. Optionally, you can define a separate liveness check. @@ -83,7 +83,7 @@ Configure probes by using the **Health Check** section while editing deployments
Edit Deployment: Health Check Section
-![Health Check Section]({{< baseurl >}}/img/rancher/health-check-section.png) +![Health Check Section]({{}}/img/rancher/health-check-section.png) ### Configuring Checks @@ -95,7 +95,7 @@ While you create a workload using Rancher v2.x, we recommend configuring a check TCP checks monitor your deployment's health by attempting to open a connection to the pod over a specified port. If the probe can open the port, it's considered healthy. Failure to open it is considered unhealthy, which notifies Kubernetes that it should kill the pod and then replace it according to its [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). (this applies to Liveness probes, for Readiness probes, it will mark the pod as Unready). -You can configure the probe along with values for specifying its behavior by selecting the **TCP connection opens successfully** option in the **Health Check** section. For more information, see [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). +You can configure the probe along with values for specifying its behavior by selecting the **TCP connection opens successfully** option in the **Health Check** section. For more information, see [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). ![TCP Check]({{}}/img/rancher/readiness-check-tcp.png) @@ -133,7 +133,7 @@ When you configure a readiness check using Rancher v2.x, the `readinessProbe` di HTTP checks monitor your deployment's health by sending an HTTP GET request to a specific URL path that you define. If the pod responds with a message range of `200`-`400`, the health check is considered successful. If the pod replies with any other value, the check is considered unsuccessful, so Kubernetes kills and replaces the pod according to its [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). (this applies to Liveness probes, for Readiness probes, it will mark the pod as Unready). -You can configure the probe along with values for specifying its behavior by selecting the **HTTP returns successful status** or **HTTPS returns successful status**. For more information, see [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#healthcheck-parameter-mappings). +You can configure the probe along with values for specifying its behavior by selecting the **HTTP returns successful status** or **HTTPS returns successful status**. For more information, see [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#healthcheck-parameter-mappings). ![HTTP Check]({{}}/img/rancher/readiness-check-http.png) @@ -153,7 +153,7 @@ While configuring a readiness check for either the TCP or HTTP protocol, you can Rancher v2.x, like v1.6, lets you perform health checks using the TCP and HTTP protocols. However, Rancher v2.x also lets you check the health of a pod by running a command inside of it. If the container exits with a code of `0` after running the command, the pod is considered healthy. -You can configure a liveness or readiness check that executes a command that you specify by selecting the `Command run inside the container exits with status 0` option from **Health Checks** while [deploying a workload]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). +You can configure a liveness or readiness check that executes a command that you specify by selecting the `Command run inside the container exits with status 0` option from **Health Checks** while [deploying a workload]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). ![Healthcheck Execute Command]({{}}/img/rancher/healthcheck-cmd-exec.png) @@ -171,4 +171,4 @@ Rancher v1.6 Compose Parameter | Rancher v2.x Kubernetes Parameter `initializing_timeout` | `initialDelaySeconds` `strategy` | `restartPolicy` -### [Next: Schedule Your Services]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/) +### [Next: Schedule Your Services]({{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/) diff --git a/content/rancher/v2.x/en/v1.6-migration/run-migration-tool/_index.md b/content/rancher/v2.x/en/v1.6-migration/run-migration-tool/_index.md index f1d02645957..ebdebd5b9bd 100644 --- a/content/rancher/v2.x/en/v1.6-migration/run-migration-tool/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/run-migration-tool/_index.md @@ -50,7 +50,7 @@ After you download migration-tools CLI, rename it and make it executable. Next, use the migration-tools CLI to export all stacks in all of the Cattle environments into Compose files. Then, for stacks that you want to migrate to Rancher v2.x, convert the Compose files into Kubernetes manifest. ->**Prerequisite:** Create an [Account API Key]({{< baseurl >}}/rancher/v1.6/en/api/v2-beta/api-keys/#account-api-keys) to authenticate with Rancher v1.6 when using the migration-tools CLI. +>**Prerequisite:** Create an [Account API Key]({{}}/rancher/v1.6/en/api/v2-beta/api-keys/#account-api-keys) to authenticate with Rancher v1.6 when using the migration-tools CLI. 1. Export the Docker Compose files for your Cattle environments and stacks from Rancher v1.6. @@ -62,7 +62,7 @@ Next, use the migration-tools CLI to export all stacks in all of the Cattle envi **Step Result:** migration-tools exports Compose files (`docker-compose.yml` and `rancher-compose.yml`) for each stack in the `--export-dir` directory. If you omitted this option, Compose files are output to your current directory. - A unique directory is created for each environment and stack. For example, if we export each [environment/stack]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/#migration-example-files) from Rancher v1.6, the following directory structure is created: + A unique directory is created for each environment and stack. For example, if we export each [environment/stack]({{}}/rancher/v2.x/en/v1.6-migration/#migration-example-files) from Rancher v1.6, the following directory structure is created: ``` export/ # migration-tools --export-dir @@ -85,7 +85,7 @@ Next, use the migration-tools CLI to export all stacks in all of the Cattle envi >**Note:** If you omit the `--docker-file` and `--rancher-file` options from your command, migration-tools uses the current working directory to find Compose files. ->**Want full usage and options for the migration-tools CLI?** See the [Migration Tools CLI Reference]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/migration-tools-ref/). +>**Want full usage and options for the migration-tools CLI?** See the [Migration Tools CLI Reference]({{}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/migration-tools-ref/). ### migration-tools CLI Output @@ -104,7 +104,7 @@ When a you export a service from Rancher v1.6 that exposes public ports, migrati #### Migration Example File Output -If we parse the two example files from [Migration Example Files]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/#migration-example-files), `docker-compose.yml` and `rancher-compose.yml`, the following files are output: +If we parse the two example files from [Migration Example Files]({{}}/rancher/v2.x/en/v1.6-migration/#migration-example-files), `docker-compose.yml` and `rancher-compose.yml`, the following files are output: File | Description -----|------------ @@ -244,13 +244,13 @@ You can deploy the Kubernetes manifests created by migration-tools by importing
Deploy Services: Import Kubernetes Manifest
-![Deploy Services]({{< baseurl >}}/img/rancher/deploy-service.gif) +![Deploy Services]({{}}/img/rancher/deploy-service.gif) {{% /tab %}} {{% tab "Rancher CLI" %}} ->**Prerequisite:** [Install Rancher CLI]({{< baseurl >}}/rancher/v2.x/en/cli/) for Rancher v2.x. +>**Prerequisite:** [Install Rancher CLI]({{}}/rancher/v2.x/en/cli/) for Rancher v2.x. Use the following Rancher CLI commands to deploy your application using Rancher v2.x. For each Kubernetes manifest output by migration-tools CLI, enter one of the commands below to import it into Rancher v2.x. @@ -267,7 +267,7 @@ Following importation, you can view your v1.6 services in the v2.x UI as Kuberne
Imported Services
-![Imported Services]({{< baseurl >}}/img/rancher/imported-workloads.png) +![Imported Services]({{}}/img/rancher/imported-workloads.png) ## What Now? @@ -275,15 +275,15 @@ Although the migration-tool CLI parses your Rancher v1.6 Compose files to Kubern
Edit Migrated Services
-![Edit Migrated Workload]({{< baseurl >}}/img/rancher/edit-migration-workload.gif) +![Edit Migrated Workload]({{}}/img/rancher/edit-migration-workload.gif) As mentioned in [Migration Tools CLI Output](#migration-tools-cli-output), the `output.txt` files generated during parsing lists the manual steps you must make for each deployment. Review the upcoming topics for more information on manually editing your Kubernetes specs. -Open your `output.txt` file and take a look at its contents. When you parsed your Compose files into Kubernetes manifests, migration-tools CLI output a manifest for each workload that it creates for Kubernetes. For example, our when our [Migration Example Files]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/#migration-example-files) are parsed into Kubernetes manifests, `output.txt` lists each resultant parsed [Kubernetes manifest file](#migration-example-file-output) (i.e., workloads). Each workload features a list of action items to restore operations for the workload in v2.x. +Open your `output.txt` file and take a look at its contents. When you parsed your Compose files into Kubernetes manifests, migration-tools CLI output a manifest for each workload that it creates for Kubernetes. For example, our when our [Migration Example Files]({{}}/rancher/v2.x/en/v1.6-migration/#migration-example-files) are parsed into Kubernetes manifests, `output.txt` lists each resultant parsed [Kubernetes manifest file](#migration-example-file-output) (i.e., workloads). Each workload features a list of action items to restore operations for the workload in v2.x.
Output.txt Example
-![output.txt]({{< baseurl >}}/img/rancher/output-dot-text.png) +![output.txt]({{}}/img/rancher/output-dot-text.png) The following table lists possible directives that may appear in `output.txt`, what they mean, and links on how to resolve them. @@ -296,16 +296,16 @@ Directive | Instructions [scale][5] | In v1.6, scale refers to the number of container replicas running on a single node. In v2.x, this feature is replaced by replica sets. start_on_create | No Kubernetes equivalent. No action is required from you. -[1]:{{< baseurl >}}/rancher/v2.x/en/v1.6-migration/monitor-apps/#configuring-probes-in-rancher-v2-x -[2]:{{< baseurl >}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#scheduling-using-labels -[3]:{{< baseurl >}}/rancher/v2.x/en/v1.6-migration/discover-services -[4]:{{< baseurl >}}/rancher/v2.x/en/v1.6-migration/expose-services -[5]:{{< baseurl >}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#scheduling-pods-to-a-specific-node +[1]:{{}}/rancher/v2.x/en/v1.6-migration/monitor-apps/#configuring-probes-in-rancher-v2-x +[2]:{{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#scheduling-using-labels +[3]:{{}}/rancher/v2.x/en/v1.6-migration/discover-services +[4]:{{}}/rancher/v2.x/en/v1.6-migration/expose-services +[5]:{{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#scheduling-pods-to-a-specific-node -[7]:{{< baseurl >}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#scheduling-using-labels -[8]:{{< baseurl >}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#scheduling-global-services -[9]:{{< baseurl >}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#label-affinity-antiaffinity +[7]:{{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#scheduling-using-labels +[8]:{{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#scheduling-global-services +[9]:{{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#label-affinity-antiaffinity -### [Next: Expose Your Services]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/expose-services/) +### [Next: Expose Your Services]({{}}/rancher/v2.x/en/v1.6-migration/expose-services/) diff --git a/content/rancher/v2.x/en/v1.6-migration/schedule-workloads/_index.md b/content/rancher/v2.x/en/v1.6-migration/schedule-workloads/_index.md index 5d070f1638f..e78fa280b0c 100644 --- a/content/rancher/v2.x/en/v1.6-migration/schedule-workloads/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/schedule-workloads/_index.md @@ -13,7 +13,7 @@ You can schedule your migrated v1.6 services while editing a deployment. Schedul
Editing Workloads: Workload Type and Node Scheduling Sections
-![Workload Type and Node Scheduling Sections]({{< baseurl >}}/img/rancher/migrate-schedule-workloads.png) +![Workload Type and Node Scheduling Sections]({{}}/img/rancher/migrate-schedule-workloads.png) ## In This Document @@ -39,7 +39,7 @@ Rancher v2.x retains _all_ methods available in v1.6 for scheduling your service In v1.6, you would schedule a service to a host while adding a service to a Stack. In Rancher v2.x., the equivalent action is to schedule a workload for deployment. The following composite image shows a comparison of the UI used for scheduling in Rancher v2.x versus v1.6. -![Node Scheduling: Rancher v2.x vs v1.6]({{< baseurl >}}/img/rancher/node-scheduling.png) +![Node Scheduling: Rancher v2.x vs v1.6]({{}}/img/rancher/node-scheduling.png) ## Node Scheduling Options @@ -47,7 +47,7 @@ Rancher offers a variety of options when scheduling nodes to host workload pods You can choose a scheduling option as you deploy a workload. The term _workload_ is synonymous with adding a service to a Stack in Rancher v1.6). You can deploy a workload by using the context menu to browse to a cluster project (` > > Workloads`). -The sections that follow provide information on using each scheduling options, as well as any notable changes from Rancher v1.6. For full instructions on deploying a workload in Rancher v2.x beyond just scheduling options, see [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). +The sections that follow provide information on using each scheduling options, as well as any notable changes from Rancher v1.6. For full instructions on deploying a workload in Rancher v2.x beyond just scheduling options, see [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). Option | v1.6 Feature | v2.x Feature -------|------|------ @@ -64,13 +64,13 @@ Option | v1.6 Feature | v2.x Feature In v1.6, you could control the number of container replicas deployed for a service. You can schedule pods the same way in v2.x, but you'll have to set the scale manually while editing a workload. -![Resolve Scale]({{< baseurl >}}/img/rancher/resolve-scale.png) +![Resolve Scale]({{}}/img/rancher/resolve-scale.png) During migration, you can resolve `scale` entries in `output.txt` by setting a value for the **Workload Type** option **Scalable deployment** depicted below.
Scalable Deployment Option
-![Workload Scale]({{< baseurl >}}/img/rancher/workload-type-option.png) +![Workload Scale]({{}}/img/rancher/workload-type-option.png) ### Scheduling Pods to a Specific Node @@ -81,7 +81,7 @@ As you deploy a workload, use the **Node Scheduling** section to choose a node t
Rancher v2.x: Workload Deployment
-![Workload Tab and Group by Node Icon]({{< baseurl >}}/img/rancher/schedule-specific-node.png) +![Workload Tab and Group by Node Icon]({{}}/img/rancher/schedule-specific-node.png) Rancher schedules pods to the node you select if 1) there are compute resource available for the node and 2) you've configured port mapping to use the HostPort option, that there are no port conflicts. @@ -89,7 +89,7 @@ If you expose the workload using a NodePort that conflicts with another workload After the workload is created, you can confirm that the pods are scheduled to your chosen node. From the project view, click **Resources > Workloads.** (In versions prior to v2.3.0, click the **Workloads** tab.) Click the **Group by Node** icon to sort your workloads by node. Note that both Nginx pods are scheduled to the same node. -![Pods Scheduled to Same Node]({{< baseurl >}}/img/rancher/scheduled-nodes.png) +![Pods Scheduled to Same Node]({{}}/img/rancher/scheduled-nodes.png) ). A _DaemonSet_ functions exactly like a Rancher v1.6 global service. The Kubernetes scheduler deploys a pod on each node of the cluster, and as new nodes are added, the scheduler will start new pods on them provided they match the scheduling requirements of the workload. Additionally, in v2.x, you can also limit a DaemonSet to be deployed to nodes that have a specific label. @@ -217,7 +217,7 @@ To create a daemonset while configuring a workload, choose **Run one pod on each
Workload Configuration: Choose run one pod on each node to configure daemonset
-![choose Run one pod on each node]({{< baseurl >}}/img/rancher/workload-type.png) +![choose Run one pod on each node]({{}}/img/rancher/workload-type.png) ### Scheduling Pods Using Resource Constraints @@ -240,8 +240,8 @@ To declare resource constraints, edit your migrated workloads, editing the **Sec
Scheduling: Resource Constraint Settings
-![Resource Constraint Settings]({{< baseurl >}}/img/rancher/resource-constraint-settings.png) +![Resource Constraint Settings]({{}}/img/rancher/resource-constraint-settings.png) You can find more detail about these specs and how to use them in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container). -### [Next: Service Discovery]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/discover-services/) +### [Next: Service Discovery]({{}}/rancher/v2.x/en/v1.6-migration/discover-services/) diff --git a/content/rke/latest/en/cert-mgmt/_index.md b/content/rke/latest/en/cert-mgmt/_index.md index 5d202d6bbdc..21f9f53011e 100644 --- a/content/rke/latest/en/cert-mgmt/_index.md +++ b/content/rke/latest/en/cert-mgmt/_index.md @@ -12,9 +12,9 @@ Certificates are an important part of Kubernetes clusters and are used for all K ## Generating Certificate Signing Requests (CSRs) and Keys -If you want to create and sign the certificates by a real Certificate Authority (CA), you can use RKE to [generate a set of Certificate Signing Requests (CSRs) and keys]({{< baseurl >}}/rke/latest/en/installation/certs/#generating-certificate-signing-requests-csrs-and-keys). +If you want to create and sign the certificates by a real Certificate Authority (CA), you can use RKE to [generate a set of Certificate Signing Requests (CSRs) and keys]({{}}/rke/latest/en/installation/certs/#generating-certificate-signing-requests-csrs-and-keys). -You can use the CSRs and keys to sign the certificates by a real CA. After the certificates are signed, these custom certificates can be used by RKE to as [custom certificates]({{< baseurl >}}/rke/latest/en/installation/certs/) for the Kubernetes cluster. +You can use the CSRs and keys to sign the certificates by a real CA. After the certificates are signed, these custom certificates can be used by RKE to as [custom certificates]({{}}/rke/latest/en/installation/certs/) for the Kubernetes cluster. ## Certificate Rotation diff --git a/content/rke/latest/en/config-options/_index.md b/content/rke/latest/en/config-options/_index.md index ecf29f2a412..abbf6e2209a 100644 --- a/content/rke/latest/en/config-options/_index.md +++ b/content/rke/latest/en/config-options/_index.md @@ -6,35 +6,35 @@ weight: 200 When setting up your `cluster.yml` for RKE, there are a lot of different options that can be configured to control the behavior of how RKE launches Kubernetes. -There are several options that can be configured in cluster configuration option. There are several [example yamls]({{< baseurl >}}/rke/latest/en/example-yamls/) that contain all the options. +There are several options that can be configured in cluster configuration option. There are several [example yamls]({{}}/rke/latest/en/example-yamls/) that contain all the options. ### Configuring Nodes -* [Nodes]({{< baseurl >}}/rke/latest/en/config-options/nodes/) +* [Nodes]({{}}/rke/latest/en/config-options/nodes/) * [Ignoring unsupported Docker versions](#supported-docker-versions) -* [Private Registries]({{< baseurl >}}/rke/latest/en/config-options/private-registries/) +* [Private Registries]({{}}/rke/latest/en/config-options/private-registries/) * [Cluster Level SSH Key Path](#cluster-level-ssh-key-path) * [SSH Agent](#ssh-agent) -* [Bastion Host]({{< baseurl >}}/rke/latest/en/config-options/bastion-host/) +* [Bastion Host]({{}}/rke/latest/en/config-options/bastion-host/) ### Configuring Kubernetes Cluster * [Cluster Name](#cluster-name) * [Kubernetes Version](#kubernetes-version) * [Prefix Path](#prefix-path) -* [System Images]({{< baseurl >}}/rke/latest/en/config-options/system-images/) -* [Services]({{< baseurl >}}/rke/latest/en/config-options/services/) -* [Extra Args and Binds and Environment Variables]({{< baseurl >}}/rke/latest/en/config-options/services/services-extras/) -* [External Etcd]({{< baseurl >}}/rke/latest/en/config-options/services/external-etcd/) -* [Authentication]({{< baseurl >}}/rke/latest/en/config-options/authentication/) -* [Authorization]({{< baseurl >}}/rke/latest/en/config-options/authorization/) +* [System Images]({{}}/rke/latest/en/config-options/system-images/) +* [Services]({{}}/rke/latest/en/config-options/services/) +* [Extra Args and Binds and Environment Variables]({{}}/rke/latest/en/config-options/services/services-extras/) +* [External Etcd]({{}}/rke/latest/en/config-options/services/external-etcd/) +* [Authentication]({{}}/rke/latest/en/config-options/authentication/) +* [Authorization]({{}}/rke/latest/en/config-options/authorization/) * [Rate Limiting]({{}}/rke/latest/en/config-options/rate-limiting/) -* [Cloud Providers]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/) +* [Cloud Providers]({{}}/rke/latest/en/config-options/cloud-providers/) * [Audit Log]({{}}/rke/latest/en/config-options/audit-log) -* [Add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/) - * [Network Plug-ins]({{< baseurl >}}/rke/latest/en/config-options/add-ons/network-plugins/) - * [DNS providers]({{< baseurl >}}/rke/latest/en/config-options/add-ons/dns/) - * [Ingress Controllers]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/) - * [Metrics Server]({{< baseurl >}}/rke/latest/en/config-options/add-ons/metrics-server/) - * [User-Defined Add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) +* [Add-ons]({{}}/rke/latest/en/config-options/add-ons/) + * [Network Plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/) + * [DNS providers]({{}}/rke/latest/en/config-options/add-ons/dns/) + * [Ingress Controllers]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/) + * [Metrics Server]({{}}/rke/latest/en/config-options/add-ons/metrics-server/) + * [User-Defined Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) * [Add-ons Job Timeout](#add-ons-job-timeout) @@ -79,7 +79,7 @@ prefix_path: /opt/custom_path ### Cluster Level SSH Key Path -RKE connects to host(s) using `ssh`. Typically, each node will have an independent path for each ssh key, i.e. `ssh_key_path`, in the `nodes` section, but if you have a SSH key that is able to access **all** hosts in your cluster configuration file, you can set the path to that ssh key at the top level. Otherwise, you would set the ssh key path in the [nodes]({{< baseurl >}}/rke/latest/en/config-options/nodes/). +RKE connects to host(s) using `ssh`. Typically, each node will have an independent path for each ssh key, i.e. `ssh_key_path`, in the `nodes` section, but if you have a SSH key that is able to access **all** hosts in your cluster configuration file, you can set the path to that ssh key at the top level. Otherwise, you would set the ssh key path in the [nodes]({{}}/rke/latest/en/config-options/nodes/). If ssh key paths are defined at the cluster level and at the node level, the node-level key will take precedence. @@ -109,4 +109,4 @@ $ echo $SSH_AUTH_SOCK ### Add-ons Job Timeout -You can define [add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/) to be deployed after the Kubernetes cluster comes up, which uses Kubernetes [jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/). RKE will stop attempting to retrieve the job status after the timeout, which is in seconds. The default timeout value is `30` seconds. +You can define [add-ons]({{}}/rke/latest/en/config-options/add-ons/) to be deployed after the Kubernetes cluster comes up, which uses Kubernetes [jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/). RKE will stop attempting to retrieve the job status after the timeout, which is in seconds. The default timeout value is `30` seconds. diff --git a/content/rke/latest/en/config-options/add-ons/_index.md b/content/rke/latest/en/config-options/add-ons/_index.md index a665230b268..f2cb7765e3b 100644 --- a/content/rke/latest/en/config-options/add-ons/_index.md +++ b/content/rke/latest/en/config-options/add-ons/_index.md @@ -5,12 +5,12 @@ weight: 260 RKE supports configuring pluggable add-ons in the cluster YML. Add-ons are used to deploy several cluster components including: -* [Network plug-ins]({{< baseurl >}}/rke/latest/en/config-options/add-ons/network-plugins/) -* [Ingress controller]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/) -* [DNS provider]({{< baseurl >}}/rke/latest/en/config-options/add-ons/dns/) -* [Metrics Server]({{< baseurl >}}/rke/latest/en/config-options/add-ons/metrics-server/) +* [Network plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/) +* [Ingress controller]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/) +* [DNS provider]({{}}/rke/latest/en/config-options/add-ons/dns/) +* [Metrics Server]({{}}/rke/latest/en/config-options/add-ons/metrics-server/) -These add-ons require images that can be found under the [`system_images` directive]({{< baseurl >}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with each add-on, but these can be overridden by changing the image tag in `system_images`. +These add-ons require images that can be found under the [`system_images` directive]({{}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with each add-on, but these can be overridden by changing the image tag in `system_images`. There are a few things worth noting: @@ -25,7 +25,7 @@ As of version v0.1.7, add-ons are split into two categories: - **Critical add-ons:** If these add-ons fail to deploy for any reason, RKE will error out. - **Non-critical add-ons:** If these add-ons fail to deploy, RKE will only log a warning and continue deploying any other add-ons. -Currently, only the [network plug-in]({{< baseurl >}}/rke/latest/en/config-options/add-ons/network-plugins/) is considered critical. KubeDNS, [ingress controllers]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/) and [user-defined add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) are considered non-critical. +Currently, only the [network plug-in]({{}}/rke/latest/en/config-options/add-ons/network-plugins/) is considered critical. KubeDNS, [ingress controllers]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/) and [user-defined add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) are considered non-critical. ## Add-on deployment jobs diff --git a/content/rke/latest/en/config-options/add-ons/dns/_index.md b/content/rke/latest/en/config-options/add-ons/dns/_index.md index a00aa2e5a12..6168f1fc4e7 100644 --- a/content/rke/latest/en/config-options/add-ons/dns/_index.md +++ b/content/rke/latest/en/config-options/add-ons/dns/_index.md @@ -26,7 +26,7 @@ CoreDNS can only be used on Kubernetes v1.12.0 and higher. RKE will deploy CoreDNS as a Deployment with the default replica count of 1. The pod consists of 1 container: `coredns`. RKE will also deploy coredns-autoscaler as a Deployment, which will scale the coredns Deployment by using the number of cores and nodes. Please see [Linear Mode](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler#linear-mode) for more information about this logic. -The images used for CoreDNS are under the [`system_images` directive]({{< baseurl >}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with CoreDNS, but these can be overridden by changing the image tag in `system_images`. +The images used for CoreDNS are under the [`system_images` directive]({{}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with CoreDNS, but these can be overridden by changing the image tag in `system_images`. ## Scheduling CoreDNS @@ -66,7 +66,7 @@ dns: RKE will deploy kube-dns as a Deployment with the default replica count of 1. The pod consists of 3 containers: `kubedns`, `dnsmasq` and `sidecar`. RKE will also deploy kube-dns-autoscaler as a Deployment, which will scale the kube-dns Deployment by using the number of cores and nodes. Please see [Linear Mode](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler#linear-mode) for more information about this logic. -The images used for kube-dns are under the [`system_images` directive]({{< baseurl >}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with kube-dns, but these can be overridden by changing the image tag in `system_images`. +The images used for kube-dns are under the [`system_images` directive]({{}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with kube-dns, but these can be overridden by changing the image tag in `system_images`. ## Scheduling kube-dns diff --git a/content/rke/latest/en/config-options/add-ons/ingress-controllers/_index.md b/content/rke/latest/en/config-options/add-ons/ingress-controllers/_index.md index a7da4af0cd6..4e32fb33858 100644 --- a/content/rke/latest/en/config-options/add-ons/ingress-controllers/_index.md +++ b/content/rke/latest/en/config-options/add-ons/ingress-controllers/_index.md @@ -10,7 +10,7 @@ By default, RKE deploys the NGINX ingress controller on all schedulable nodes. RKE will deploy the ingress controller as a DaemonSet with `hostnetwork: true`, so ports `80`, and `443` will be opened on each node where the controller is deployed. -The images used for ingress controller is under the [`system_images` directive]({{< baseurl >}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with the ingress controller, but these can be overridden by changing the image tag in `system_images`. +The images used for ingress controller is under the [`system_images` directive]({{}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with the ingress controller, but these can be overridden by changing the image tag in `system_images`. ## Scheduling Ingress Controllers diff --git a/content/rke/latest/en/config-options/add-ons/metrics-server/_index.md b/content/rke/latest/en/config-options/add-ons/metrics-server/_index.md index 88775ac5577..61f0d303601 100644 --- a/content/rke/latest/en/config-options/add-ons/metrics-server/_index.md +++ b/content/rke/latest/en/config-options/add-ons/metrics-server/_index.md @@ -7,7 +7,7 @@ By default, RKE deploys [Metrics Server](https://github.com/kubernetes-incubator RKE will deploy Metrics Server as a Deployment. -The image used for Metrics Server is under the [`system_images` directive]({{< baseurl >}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there is a default image associated with the Metrics Server, but these can be overridden by changing the image tag in `system_images`. +The image used for Metrics Server is under the [`system_images` directive]({{}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there is a default image associated with the Metrics Server, but these can be overridden by changing the image tag in `system_images`. ## Disabling the Metrics Server diff --git a/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md b/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md index cb26c78fe57..7da2af08643 100644 --- a/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md +++ b/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md @@ -20,7 +20,7 @@ network: plugin: flannel ``` -The images used for network plug-ins are under the [`system_images` directive]({{< baseurl >}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with each network plug-in, but these can be overridden by changing the image tag in `system_images`. +The images used for network plug-ins are under the [`system_images` directive]({{}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with each network plug-in, but these can be overridden by changing the image tag in `system_images`. # Disabling Deployment of a Network Plug-in diff --git a/content/rke/latest/en/config-options/add-ons/user-defined-add-ons/_index.md b/content/rke/latest/en/config-options/add-ons/user-defined-add-ons/_index.md index 3f2dd072f91..72808d38936 100644 --- a/content/rke/latest/en/config-options/add-ons/user-defined-add-ons/_index.md +++ b/content/rke/latest/en/config-options/add-ons/user-defined-add-ons/_index.md @@ -3,7 +3,7 @@ title: User-Defined Add-Ons weight: 263 --- -Besides the [network plug-in]({{< baseurl >}}/rke/latest/en/config-options/add-ons/network-plugins) and [ingress controllers]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/), you can define any add-on that you want deployed after the Kubernetes cluster is deployed. +Besides the [network plug-in]({{}}/rke/latest/en/config-options/add-ons/network-plugins) and [ingress controllers]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/), you can define any add-on that you want deployed after the Kubernetes cluster is deployed. There are two ways that you can specify an add-on. diff --git a/content/rke/latest/en/config-options/bastion-host/_index.md b/content/rke/latest/en/config-options/bastion-host/_index.md index 3b6848759c6..d2710e8c42d 100644 --- a/content/rke/latest/en/config-options/bastion-host/_index.md +++ b/content/rke/latest/en/config-options/bastion-host/_index.md @@ -3,7 +3,7 @@ title: Bastion/Jump Host Configuration weight: 220 --- -Since RKE uses `ssh` to connect to [nodes]({{< baseurl >}}/rke/latest/en/config-options/nodes/), you can configure the `cluster.yml` so RKE will use a bastion host. Keep in mind that the [port requirements]({{< baseurl >}}/rke/latest/en/os/#ports) for the RKE node move to the configured bastion host. Our private SSH key(s) only needs to reside on the host running RKE. You do not need to copy your private SSH key(s) to the bastion host. +Since RKE uses `ssh` to connect to [nodes]({{}}/rke/latest/en/config-options/nodes/), you can configure the `cluster.yml` so RKE will use a bastion host. Keep in mind that the [port requirements]({{}}/rke/latest/en/os/#ports) for the RKE node move to the configured bastion host. Our private SSH key(s) only needs to reside on the host running RKE. You do not need to copy your private SSH key(s) to the bastion host. ```yaml bastion_host: diff --git a/content/rke/latest/en/config-options/cloud-providers/_index.md b/content/rke/latest/en/config-options/cloud-providers/_index.md index 27881c437e2..45501bcf784 100644 --- a/content/rke/latest/en/config-options/cloud-providers/_index.md +++ b/content/rke/latest/en/config-options/cloud-providers/_index.md @@ -6,9 +6,9 @@ weight: 250 RKE supports the ability to set your specific [cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) for your Kubernetes cluster. There are specific cloud configurations for these cloud providers. To enable a cloud provider its name as well as any required configuration options must be provided under the `cloud_provider` directive in the cluster YML. -* [AWS]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/aws) -* [Azure]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/azure) -* [OpenStack]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/openstack) -* [vSphere]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/vsphere) +* [AWS]({{}}/rke/latest/en/config-options/cloud-providers/aws) +* [Azure]({{}}/rke/latest/en/config-options/cloud-providers/azure) +* [OpenStack]({{}}/rke/latest/en/config-options/cloud-providers/openstack) +* [vSphere]({{}}/rke/latest/en/config-options/cloud-providers/vsphere) -Outside of this list, RKE also supports the ability to handle any [custom cloud provider]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/custom). +Outside of this list, RKE also supports the ability to handle any [custom cloud provider]({{}}/rke/latest/en/config-options/cloud-providers/custom). diff --git a/content/rke/latest/en/config-options/cloud-providers/vsphere/troubleshooting/_index.md b/content/rke/latest/en/config-options/cloud-providers/vsphere/troubleshooting/_index.md index 6d2cffca67f..74801997ee5 100644 --- a/content/rke/latest/en/config-options/cloud-providers/vsphere/troubleshooting/_index.md +++ b/content/rke/latest/en/config-options/cloud-providers/vsphere/troubleshooting/_index.md @@ -8,11 +8,11 @@ If you are experiencing issues while provisioning a cluster with enabled vSphere - controller-manager (Manages volumes in vCenter) - kubelet: (Mounts vSphere volumes to pods) -If your cluster is not configured with external [Cluster Logging]({{< baseurl >}}/rancher/v2.x/en/tools/logging/), you will need to SSH into nodes to get the logs of the `kube-controller-manager` (running on one of the control plane nodes) and the `kubelet` (pertaining to the node where the stateful pod has been scheduled). +If your cluster is not configured with external [Cluster Logging]({{}}/rancher/v2.x/en/tools/logging/), you will need to SSH into nodes to get the logs of the `kube-controller-manager` (running on one of the control plane nodes) and the `kubelet` (pertaining to the node where the stateful pod has been scheduled). The easiest way to create a SSH session with a node is the Rancher CLI tool. -1. [Configure the Rancher CLI]({{< baseurl >}}/rancher/v2.x/en/cli/) for your cluster. +1. [Configure the Rancher CLI]({{}}/rancher/v2.x/en/cli/) for your cluster. 2. Run the following command to get a shell to the corresponding nodes: ```sh diff --git a/content/rke/latest/en/config-options/nodes/_index.md b/content/rke/latest/en/config-options/nodes/_index.md index 75321c4c6b9..e15b7e98f21 100644 --- a/content/rke/latest/en/config-options/nodes/_index.md +++ b/content/rke/latest/en/config-options/nodes/_index.md @@ -116,7 +116,7 @@ The `internal_address` provides the ability to have nodes with multiple addresse The `hostname_override` is used to be able to provide a friendly name for RKE to use when registering the node in Kubernetes. This hostname doesn't need to be a routable address, but it must be a valid [Kubernetes resource name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). If the `hostname_override` isn't set, then the `address` directive is used when registering the node in Kubernetes. -> **Note:** When [cloud providers]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/) are configured, you may need to override the hostname in order to use the cloud provider correctly. There is an exception for the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws), where the `hostname_override` field will be explicitly ignored. +> **Note:** When [cloud providers]({{}}/rke/latest/en/config-options/cloud-providers/) are configured, you may need to override the hostname in order to use the cloud provider correctly. There is an exception for the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws), where the `hostname_override` field will be explicitly ignored. ### SSH Port @@ -130,7 +130,7 @@ For each node, you specify the `user` to be used when connecting to this node. T For each node, you specify the path, i.e. `ssh_key_path`, for the SSH private key to be used when connecting to this node. The default key path for each node is `~/.ssh/id_rsa`. -> **Note:** If you have a private key that can be used across all nodes, you can set the [SSH key path at the cluster level]({{< baseurl >}}/rke/latest/en/config-options/#cluster-level-ssh-key-path). The SSH key path set in each node will always take precedence. +> **Note:** If you have a private key that can be used across all nodes, you can set the [SSH key path at the cluster level]({{}}/rke/latest/en/config-options/#cluster-level-ssh-key-path). The SSH key path set in each node will always take precedence. ### SSH Key @@ -150,7 +150,7 @@ If the Docker socket is different than the default, you can set the `docker_sock ### Labels -You have the ability to add an arbitrary map of labels for each node. It can be used when using the [ingress controller's]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/) `node_selector` option. +You have the ability to add an arbitrary map of labels for each node. It can be used when using the [ingress controller's]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/) `node_selector` option. ### Taints diff --git a/content/rke/latest/en/config-options/private-registries/_index.md b/content/rke/latest/en/config-options/private-registries/_index.md index 5a5c1a4d18e..2f448920312 100644 --- a/content/rke/latest/en/config-options/private-registries/_index.md +++ b/content/rke/latest/en/config-options/private-registries/_index.md @@ -19,7 +19,7 @@ private_registries: ### Default Registry -As of v0.1.10, RKE supports specifying a default registry from the list of private registries to be used with all [system images]({{< baseurl >}}/rke/latest/en/config-options/system-images/) . In this example .RKE will use `registry.com` as the default registry for all system images, e.g. `rancher/rke-tools:v0.1.14` will become `registry.com/rancher/rke-tools:v0.1.14`. +As of v0.1.10, RKE supports specifying a default registry from the list of private registries to be used with all [system images]({{}}/rke/latest/en/config-options/system-images/) . In this example .RKE will use `registry.com` as the default registry for all system images, e.g. `rancher/rke-tools:v0.1.14` will become `registry.com/rancher/rke-tools:v0.1.14`. ```yaml private_registries: @@ -31,9 +31,9 @@ private_registries: ### Air-gapped Setups -By default, all system images are being pulled from DockerHub. If you are on a system that does not have access to DockerHub, you will need to create a private registry that is populated with all the required [system images]({{< baseurl >}}/rke/latest/en/config-options/system-images/). +By default, all system images are being pulled from DockerHub. If you are on a system that does not have access to DockerHub, you will need to create a private registry that is populated with all the required [system images]({{}}/rke/latest/en/config-options/system-images/). -As of v0.1.10, you have to configure your private registry credentials, but you can specify this registry as a default registry so that all [system images]({{< baseurl >}}/rke/latest/en/config-options/system-images/) are pulled from the designated private registry. You can use the command `rke config --system-images` to get the list of default system images to populate your private registry. +As of v0.1.10, you have to configure your private registry credentials, but you can specify this registry as a default registry so that all [system images]({{}}/rke/latest/en/config-options/system-images/) are pulled from the designated private registry. You can use the command `rke config --system-images` to get the list of default system images to populate your private registry. -Prior to v0.1.10, you had to configure your private registry credentials **and** update the names of all the [system images]({{< baseurl >}}/rke/latest/en/config-options/system-images/) in the `cluster.yml` so that the image names would have the private registry URL appended before each image name. +Prior to v0.1.10, you had to configure your private registry credentials **and** update the names of all the [system images]({{}}/rke/latest/en/config-options/system-images/) in the `cluster.yml` so that the image names would have the private registry URL appended before each image name. diff --git a/content/rke/latest/en/config-options/services/_index.md b/content/rke/latest/en/config-options/services/_index.md index cfd88cc39a9..b1c7a4d4c1e 100644 --- a/content/rke/latest/en/config-options/services/_index.md +++ b/content/rke/latest/en/config-options/services/_index.md @@ -6,7 +6,7 @@ weight: 230 To deploy Kubernetes, RKE deploys several core components or services in Docker containers on the nodes. Based on the roles of the node, the containers deployed may be different. -**All services support additional [custom arguments, Docker mount binds and extra environment variables]({{< baseurl >}}/rke/latest/en/config-options/services/services-extras/).** +**All services support additional [custom arguments, Docker mount binds and extra environment variables]({{}}/rke/latest/en/config-options/services/services-extras/).** | Component | Services key name in cluster.yml | |-------------------------|----------------------------------| @@ -23,13 +23,13 @@ Kubernetes uses [etcd](https://etcd.io/) as a store for cluster state and data. RKE supports running etcd in a single node mode or in HA cluster mode. It also supports adding and removing etcd nodes to the cluster. -You can enable etcd to [take recurring snapshots]({{< baseurl >}}/rke/latest/en/etcd-snapshots/#recurring-snapshots). These snapshots can be used to [restore etcd]({{< baseurl >}}/rke/latest/en/etcd-snapshots/#etcd-disaster-recovery). +You can enable etcd to [take recurring snapshots]({{}}/rke/latest/en/etcd-snapshots/#recurring-snapshots). These snapshots can be used to [restore etcd]({{}}/rke/latest/en/etcd-snapshots/#etcd-disaster-recovery). -By default, RKE will deploy a new etcd service, but you can also run Kubernetes with an [external etcd service]({{< baseurl >}}/rke/latest/en/config-options/services/external-etcd/). +By default, RKE will deploy a new etcd service, but you can also run Kubernetes with an [external etcd service]({{}}/rke/latest/en/config-options/services/external-etcd/). ## Kubernetes API Server -> **Note for Rancher 2 users** If you are configuring Cluster Options using a [Config File]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) when creating [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the names of services should contain underscores only: `kube_api`. This only applies to Rancher v2.0.5 and v2.0.6. +> **Note for Rancher 2 users** If you are configuring Cluster Options using a [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) when creating [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the names of services should contain underscores only: `kube_api`. This only applies to Rancher v2.0.5 and v2.0.6. The [Kubernetes API](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/) REST service, which handles requests and data for all Kubernetes objects and provide shared state for all the other Kubernetes components. @@ -58,10 +58,10 @@ RKE supports the following options for the `kube-api` service : - **Pod Security Policy** (`pod_security_policy`) - An option to enable the [Kubernetes Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). By default, we do not enable pod security policies as it is set to `false`. > **Note:** If you set `pod_security_policy` value to `true`, RKE will configure an open policy to allow any pods to work on the cluster. You will need to configure your own policies to fully utilize PSP. - **Always Pull Images** (`always_pull_images`) - Enable `AlwaysPullImages` Admission controller plugin. Enabling `AlwaysPullImages` is a security best practice. It forces Kubernetes to validate the image and pull credentials with the remote image registry. Local image layer cache will still be used, but it does add a small bit of overhead when launching containers to pull and compare image hashes. _Note: Available as of v0.2.0_ -- **Secrets Encryption Config** (`secrets_encryption_config`) - Manage Kubernetes at-rest data encryption. Documented [here]({{< baseurl >}}//rke/latest/en/config-options/secrets-encryption) +- **Secrets Encryption Config** (`secrets_encryption_config`) - Manage Kubernetes at-rest data encryption. Documented [here]({{}}//rke/latest/en/config-options/secrets-encryption) ## Kubernetes Controller Manager -> **Note for Rancher 2 users** If you are configuring Cluster Options using a [Config File]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) when creating [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the names of services should contain underscores only: `kube_controller`. This only applies to Rancher v2.0.5 and v2.0.6. +> **Note for Rancher 2 users** If you are configuring Cluster Options using a [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) when creating [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the names of services should contain underscores only: `kube_controller`. This only applies to Rancher v2.0.5 and v2.0.6. The [Kubernetes Controller Manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) service is the component responsible for running Kubernetes main control loops. The controller manager monitors the cluster desired state through the Kubernetes API server and makes the necessary changes to the current state to reach the desired state. diff --git a/content/rke/latest/en/config-options/services/external-etcd/_index.md b/content/rke/latest/en/config-options/services/external-etcd/_index.md index 173fa826972..8ee04bb7797 100644 --- a/content/rke/latest/en/config-options/services/external-etcd/_index.md +++ b/content/rke/latest/en/config-options/services/external-etcd/_index.md @@ -5,7 +5,7 @@ weight: 232 By default, RKE will launch etcd servers, but RKE also supports being able to use an external etcd. RKE only supports connecting to a TLS enabled etcd setup. -> **Note:** RKE will not accept having external etcd servers in conjunction with [nodes]({{< baseurl >}}/rke/latest/en/config-options/nodes/) with the `etcd` role. +> **Note:** RKE will not accept having external etcd servers in conjunction with [nodes]({{}}/rke/latest/en/config-options/nodes/) with the `etcd` role. ```yaml services: diff --git a/content/rke/latest/en/config-options/system-images/_index.md b/content/rke/latest/en/config-options/system-images/_index.md index ae16387c7cc..041a99a186e 100644 --- a/content/rke/latest/en/config-options/system-images/_index.md +++ b/content/rke/latest/en/config-options/system-images/_index.md @@ -75,4 +75,4 @@ system_images: ### Air-gapped Setups -If you have an air-gapped setup and cannot access `docker.io`, you will need to set up your [private registry]({{< baseurl >}}/rke/latest/en/config-options/private-registries/) in your cluster configuration file. After you set up private registry, you will need to update these images to pull from your private registry. +If you have an air-gapped setup and cannot access `docker.io`, you will need to set up your [private registry]({{}}/rke/latest/en/config-options/private-registries/) in your cluster configuration file. After you set up private registry, you will need to update these images to pull from your private registry. diff --git a/content/rke/latest/en/etcd-snapshots/_index.md b/content/rke/latest/en/etcd-snapshots/_index.md index d973feb3d2f..735fb8bab96 100644 --- a/content/rke/latest/en/etcd-snapshots/_index.md +++ b/content/rke/latest/en/etcd-snapshots/_index.md @@ -13,7 +13,7 @@ _Available as of v0.2.0_ RKE can upload your snapshots to a S3 compatible backend. -**Note:** As of RKE v0.2.0, the `pki.bundle.tar.gz` file is no longer required because of a change in how the [Kubernetes cluster state is stored]({{< baseurl >}}/rke/latest/en/installation/#kubernetes-cluster-state). +**Note:** As of RKE v0.2.0, the `pki.bundle.tar.gz` file is no longer required because of a change in how the [Kubernetes cluster state is stored]({{}}/rke/latest/en/installation/#kubernetes-cluster-state). # Backing Up a Cluster diff --git a/content/rke/latest/en/etcd-snapshots/one-time-snapshots/_index.md b/content/rke/latest/en/etcd-snapshots/one-time-snapshots/_index.md index b98f7e4ed42..400aee3b3e3 100644 --- a/content/rke/latest/en/etcd-snapshots/one-time-snapshots/_index.md +++ b/content/rke/latest/en/etcd-snapshots/one-time-snapshots/_index.md @@ -54,8 +54,8 @@ $ rke etcd snapshot-save \ | `--bucket-name` value | Specify s3 bucket name | * | | `--folder` value | Specify folder inside bucket where backup will be stored. This is optional. _Available as of v0.3.0_ | * | | `--region` value | Specify the s3 bucket location (optional) | * | -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{< baseurl >}}/rke/latest/en/config-options/#ssh-agent) | | -| `--ignore-docker-version` | [Disable Docker version check]({{< baseurl >}}/rke/latest/en/config-options/#supported-docker-versions) | +| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{}}/rke/latest/en/config-options/#ssh-agent) | | +| `--ignore-docker-version` | [Disable Docker version check]({{}}/rke/latest/en/config-options/#supported-docker-versions) | The `--access-key` and `--secret-key` options are not required if the `etcd` nodes are AWS EC2 instances that have been configured with a suitable IAM instance profile. @@ -116,8 +116,8 @@ $ rke etcd snapshot-save --config cluster.yml --name snapshot-name | --- | --- | | `--name` value | Specify snapshot name | | `--config` value | Specify an alternate cluster YAML file (default: `cluster.yml`) [$RKE_CONFIG] | -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{< baseurl >}}/rke/latest/en/config-options/#ssh-agent) | -| `--ignore-docker-version` | [Disable Docker version check]({{< baseurl >}}/rke/latest/en/config-options/#supported-docker-versions) | +| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{}}/rke/latest/en/config-options/#ssh-agent) | +| `--ignore-docker-version` | [Disable Docker version check]({{}}/rke/latest/en/config-options/#supported-docker-versions) | {{% /tab %}} {{% /tabs %}} diff --git a/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md b/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md index a4e0ce38419..3f26ea9ee47 100644 --- a/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md +++ b/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md @@ -33,7 +33,7 @@ $ rke etcd snapshot-restore --config cluster.yml --name mysnapshot The snapshot is assumed to be located in `/opt/rke/etcd-snapshots`. -**Note:** The `pki.bundle.tar.gz` file is not needed because RKE v0.2.0 changed how the [Kubernetes cluster state is stored]({{< baseurl >}}/rke/latest/en/installation/#kubernetes-cluster-state). +**Note:** The `pki.bundle.tar.gz` file is not needed because RKE v0.2.0 changed how the [Kubernetes cluster state is stored]({{}}/rke/latest/en/installation/#kubernetes-cluster-state). ### Example of Restoring from a Snapshot in S3 @@ -67,8 +67,8 @@ $ rke etcd snapshot-restore \ | `--bucket-name` value | Specify s3 bucket name | *| | `--folder` value | Specify folder inside bucket where backup will be stored. This is optional. This is optional. _Available as of v0.3.0_ | *| | `--region` value | Specify the s3 bucket location (optional) | *| -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{< baseurl >}}/rke/latest/en/config-options/#ssh-agent) | | -| `--ignore-docker-version` | [Disable Docker version check]({{< baseurl >}}/rke/latest/en/config-options/#supported-docker-versions) | +| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{}}/rke/latest/en/config-options/#ssh-agent) | | +| `--ignore-docker-version` | [Disable Docker version check]({{}}/rke/latest/en/config-options/#supported-docker-versions) | {{% /tab %}} {{% tab "RKE prior to v0.2.0"%}} @@ -109,8 +109,8 @@ The `pki.bundle.tar.gz` file is also expected to be in the same location. | --- | --- | | `--name` value | Specify snapshot name | | `--config` value | Specify an alternate cluster YAML file (default: `cluster.yml`) [$RKE_CONFIG] | -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{< baseurl >}}/rke/latest/en/config-options/#ssh-agent) | -| `--ignore-docker-version` | [Disable Docker version check]({{< baseurl >}}/rke/latest/en/config-options/#supported-docker-versions) | +| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{}}/rke/latest/en/config-options/#ssh-agent) | +| `--ignore-docker-version` | [Disable Docker version check]({{}}/rke/latest/en/config-options/#supported-docker-versions) | {{% /tab %}} {{% /tabs %}} diff --git a/content/rke/latest/en/example-yamls/_index.md b/content/rke/latest/en/example-yamls/_index.md index 9b155eecca8..9fe11e634f8 100644 --- a/content/rke/latest/en/example-yamls/_index.md +++ b/content/rke/latest/en/example-yamls/_index.md @@ -5,9 +5,9 @@ aliases: - /rke/latest/en/config-options/example-yamls/ --- -There are lots of different [configuration options]({{< baseurl >}}/rke/latest/en/config-options/) that can be set in the cluster configuration file for RKE. Here are some examples of files: +There are lots of different [configuration options]({{}}/rke/latest/en/config-options/) that can be set in the cluster configuration file for RKE. Here are some examples of files: -> **Note for Rancher 2 users** If you are configuring Cluster Options using a [Config File]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) when creating [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the names of services should contain underscores only: `kube_api` and `kube_controller`. This only applies to Rancher v2.0.5 and v2.0.6. +> **Note for Rancher 2 users** If you are configuring Cluster Options using a [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) when creating [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the names of services should contain underscores only: `kube_api` and `kube_controller`. This only applies to Rancher v2.0.5 and v2.0.6. ## Minimal `cluster.yml` example diff --git a/content/rke/latest/en/installation/_index.md b/content/rke/latest/en/installation/_index.md index 2017df4c853..30d5dc73842 100644 --- a/content/rke/latest/en/installation/_index.md +++ b/content/rke/latest/en/installation/_index.md @@ -74,20 +74,20 @@ $ brew upgrade rke The Kubernetes cluster components are launched using Docker on a Linux distro. You can use any Linux you want, as long as you can install Docker on it. -Review the [OS requirements]({{< baseurl >}}/rke/latest/en/installation/os/) and configure each node appropriately. +Review the [OS requirements]({{}}/rke/latest/en/installation/os/) and configure each node appropriately. ## Creating the Cluster Configuration File -RKE uses a cluster configuration file, referred to as `cluster.yml` to determine what nodes will be in the cluster and how to deploy Kubernetes. There are [many configuration options]({{< baseurl >}}/rke/latest/en/config-options/) that can be set in the `cluster.yml`. In our example, we will be assuming the minimum of one [node]({{< baseurl >}}/rke/latest/en/config-options/nodes) for your Kubernetes cluster. +RKE uses a cluster configuration file, referred to as `cluster.yml` to determine what nodes will be in the cluster and how to deploy Kubernetes. There are [many configuration options]({{}}/rke/latest/en/config-options/) that can be set in the `cluster.yml`. In our example, we will be assuming the minimum of one [node]({{}}/rke/latest/en/config-options/nodes) for your Kubernetes cluster. There are two easy ways to create a `cluster.yml`: -- Using our [minimal `cluster.yml`]({{< baseurl >}}/rke/latest/en/example-yamls/#minimal-cluster-yml-example) and updating it based on the node that you will be using. +- Using our [minimal `cluster.yml`]({{}}/rke/latest/en/example-yamls/#minimal-cluster-yml-example) and updating it based on the node that you will be using. - Using `rke config` to query for all the information needed. ### Using `rke config` -Run `rke config` to create a new `cluster.yml` in the current directory. This command will prompt you for all the information needed to build a cluster. See [cluster configuration options]({{< baseurl >}}/rke/latest/en/config-options/) for details on the various options. +Run `rke config` to create a new `cluster.yml` in the current directory. This command will prompt you for all the information needed to build a cluster. See [cluster configuration options]({{}}/rke/latest/en/config-options/) for details on the various options. ``` rke config --name cluster.yml @@ -117,7 +117,7 @@ To create an HA cluster, specify more than one host with role `controlplane`. _Available as of v0.2.0_ -By default, Kubernetes clusters require certificates and RKE auto-generates the certificates for all cluster components. You can also use [custom certificates]({{< baseurl >}}/rke/latest/en/installation/certs/). After the Kubernetes cluster is deployed, you can [manage these auto-generated certificates]({{< baseurl >}}/rke/latest/en/cert-mgmt/#certificate-rotation). +By default, Kubernetes clusters require certificates and RKE auto-generates the certificates for all cluster components. You can also use [custom certificates]({{}}/rke/latest/en/installation/certs/). After the Kubernetes cluster is deployed, you can [manage these auto-generated certificates]({{}}/rke/latest/en/cert-mgmt/#certificate-rotation). ## Deploying Kubernetes with RKE @@ -146,7 +146,7 @@ The last line should read `Finished building Kubernetes cluster successfully` to Save a copy of the following files in a secure location: - `cluster.yml`: The RKE cluster configuration file. -- `kube_config_cluster.yml`: The [Kubeconfig file]({{< baseurl >}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `kube_config_cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. - `cluster.rkestate`: The [Kubernetes Cluster State file](#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ > **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. @@ -161,9 +161,9 @@ Prior to v0.2.0, RKE saved the Kubernetes cluster state as a secret. When updati ## Interacting with your Kubernetes cluster -After your cluster is up and running, you can start using the [generated kubeconfig file]({{< baseurl >}}/rke/latest/en/kubeconfig) to start interacting with your Kubernetes cluster using `kubectl`. +After your cluster is up and running, you can start using the [generated kubeconfig file]({{}}/rke/latest/en/kubeconfig) to start interacting with your Kubernetes cluster using `kubectl`. After installation, there are several maintenance items that might arise: -* [Certificate Management]({{< baseurl >}}/rke/latest/en/cert-mgmt/) -* [Adding and Removing Nodes in the cluster]({{< baseurl >}}/rke/latest/en/managing-clusters) +* [Certificate Management]({{}}/rke/latest/en/cert-mgmt/) +* [Adding and Removing Nodes in the cluster]({{}}/rke/latest/en/managing-clusters) diff --git a/content/rke/latest/en/installation/certs/_index.md b/content/rke/latest/en/installation/certs/_index.md index 19e5a04a0e9..1907a0a68eb 100644 --- a/content/rke/latest/en/installation/certs/_index.md +++ b/content/rke/latest/en/installation/certs/_index.md @@ -7,7 +7,7 @@ _Available as of v0.2.0_ By default, Kubernetes clusters require certificates and RKE auto-generates the certificates for all the Kubernetes services. RKE can also use custom certificates for these Kubernetes services. -When [deploying Kubernetes with RKE]({{< baseurl >}}/rke/latest/en/installation/#deploying-kubernetes-with-rke), there are two additional options that can be used with `rke up` so that RKE uses custom certificates. +When [deploying Kubernetes with RKE]({{}}/rke/latest/en/installation/#deploying-kubernetes-with-rke), there are two additional options that can be used with `rke up` so that RKE uses custom certificates. | Option | Description | | --- | --- | @@ -45,7 +45,7 @@ The following certificates must exist in the certificate directory. If you want to create and sign the certificates by a real Certificate Authority (CA), you can use RKE to generate a set of Certificate Signing Requests (CSRs) and keys. Using the `rke cert generate-csr` command, you can generate the CSRs and keys. -1. Set up your `cluster.yml` with the [node information]({{< baseurl >}}/rke/latest/en/config-options/nodes/). +1. Set up your `cluster.yml` with the [node information]({{}}/rke/latest/en/config-options/nodes/). 2. Run `rke cert generate-csr` to generate certificates for the node(s) in the `cluster.yml`. By default, the CSRs and keys will be saved in `./cluster_certs`. To have them saved in a different directory, use `--cert-dir` to define what directory to have them saved in. diff --git a/content/rke/latest/en/managing-clusters/_index.md b/content/rke/latest/en/managing-clusters/_index.md index 5f0b6422df9..5cb87f3a6d4 100644 --- a/content/rke/latest/en/managing-clusters/_index.md +++ b/content/rke/latest/en/managing-clusters/_index.md @@ -8,7 +8,7 @@ aliases: ### Adding/Removing Nodes -RKE supports adding/removing [nodes]({{< baseurl >}}/rke/latest/en/config-options/nodes/) for worker and controlplane hosts. +RKE supports adding/removing [nodes]({{}}/rke/latest/en/config-options/nodes/) for worker and controlplane hosts. In order to add additional nodes, you update the original `cluster.yml` file with any additional nodes and specify their role in the Kubernetes cluster. @@ -26,7 +26,7 @@ You can add/remove only worker nodes, by running `rke up --update-only`. This wi In order to remove the Kubernetes components from nodes, you use the `rke remove` command. -> **Warning:** This command is irreversible and will destroy the Kubernetes cluster, including etcd snapshots on S3. If there is a disaster and your cluster is inaccessible, refer to the process for [restoring your cluster from a snapshot]({{< baseurl >}}/rke/latest/en/etcd-snapshots/#etcd-disaster-recovery). +> **Warning:** This command is irreversible and will destroy the Kubernetes cluster, including etcd snapshots on S3. If there is a disaster and your cluster is inaccessible, refer to the process for [restoring your cluster from a snapshot]({{}}rke/latest/en/etcd-snapshots/#etcd-disaster-recovery). The `rke remove` command does the following to each node in the `cluster.yml`: diff --git a/content/rke/latest/en/os/_index.md b/content/rke/latest/en/os/_index.md index d9da146c135..9c09b13e0b8 100644 --- a/content/rke/latest/en/os/_index.md +++ b/content/rke/latest/en/os/_index.md @@ -31,7 +31,7 @@ weight: 5 RKE runs on almost any Linux OS with Docker installed. Most of the development and testing of RKE occurred on Ubuntu 16.04. However, some OS's have restrictions and specific requirements. -- [SSH user]({{< baseurl >}}/rke/latest/en/config-options/nodes/#ssh-user) - The SSH user used for node access must be a member of the `docker` group on the node: +- [SSH user]({{}}/rke/latest/en/config-options/nodes/#ssh-user) - The SSH user used for node access must be a member of the `docker` group on the node: ``` usermod -aG docker @@ -100,7 +100,7 @@ net.bridge.bridge-nf-call-iptables=1 ### Red Hat Enterprise Linux (RHEL) / Oracle Enterprise Linux (OEL) / CentOS -If using Red Hat Enterprise Linux, Oracle Enterprise Linux or CentOS, you cannot use the `root` user as [SSH user]({{< baseurl >}}/rke/latest/en/config-options/nodes/#ssh-user) due to [Bugzilla 1527565](https://bugzilla.redhat.com/show_bug.cgi?id=1527565). Please follow the instructions below how to setup Docker correctly, based on the way you installed Docker on the node. +If using Red Hat Enterprise Linux, Oracle Enterprise Linux or CentOS, you cannot use the `root` user as [SSH user]({{}}/rke/latest/en/config-options/nodes/#ssh-user) due to [Bugzilla 1527565](https://bugzilla.redhat.com/show_bug.cgi?id=1527565). Please follow the instructions below how to setup Docker correctly, based on the way you installed Docker on the node. #### Using upstream Docker If you are using upstream Docker, the package name is `docker-ce` or `docker-ee`. You can check the installed package by executing: diff --git a/content/rke/latest/en/troubleshooting/_index.md b/content/rke/latest/en/troubleshooting/_index.md index fa39cdc4053..c05e95884df 100644 --- a/content/rke/latest/en/troubleshooting/_index.md +++ b/content/rke/latest/en/troubleshooting/_index.md @@ -3,5 +3,5 @@ title: Troubleshooting weight: 400 --- -* [SSH Connectivity Errors]({{< baseurl >}}/rke/latest/en/troubleshooting/ssh-connectivity-errors/) -* [Provisioning Errors]({{< baseurl >}}/rke/latest/en/troubleshooting/provisioning-errors/) +* [SSH Connectivity Errors]({{}}/rke/latest/en/troubleshooting/ssh-connectivity-errors/) +* [Provisioning Errors]({{}}/rke/latest/en/troubleshooting/provisioning-errors/) diff --git a/content/rke/latest/en/troubleshooting/provisioning-errors/_index.md b/content/rke/latest/en/troubleshooting/provisioning-errors/_index.md index 71cabddb9cc..a9867b3271a 100644 --- a/content/rke/latest/en/troubleshooting/provisioning-errors/_index.md +++ b/content/rke/latest/en/troubleshooting/provisioning-errors/_index.md @@ -5,7 +5,7 @@ weight: 200 ### Failed to get job complete status -Most common reason for this error is that a node is having issues that block the deploy job from completing successfully. See [Get node conditions]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/#get-node-conditions) how to check node conditions. +Most common reason for this error is that a node is having issues that block the deploy job from completing successfully. See [Get node conditions]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/#get-node-conditions) how to check node conditions. You can also retrieve the log from the job to see if it has an indication of the error, make sure you replace `rke-network-plugin-deploy-job` with the job name from the error: diff --git a/content/rke/latest/en/upgrades/_index.md b/content/rke/latest/en/upgrades/_index.md index 5e47ee6ab09..c080194433c 100644 --- a/content/rke/latest/en/upgrades/_index.md +++ b/content/rke/latest/en/upgrades/_index.md @@ -3,7 +3,7 @@ title: Upgrades weight: 100 --- -After RKE has deployed Kubernetes, you can upgrade the versions of the components in your Kubernetes cluster, the [definition of the Kubernetes services]({{< baseurl >}}/rke/latest/en/config-options/services/) or the [add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/). +After RKE has deployed Kubernetes, you can upgrade the versions of the components in your Kubernetes cluster, the [definition of the Kubernetes services]({{}}/rke/latest/en/config-options/services/) or the [add-ons]({{}}/rke/latest/en/config-options/add-ons/). The default Kubernetes version for each RKE version can be found in [the RKE release notes](https://github.com/rancher/rke/releases/). @@ -27,7 +27,7 @@ This page covers the following topics: ### Prerequisites - Ensure that any `system_images` configuration is absent from the `cluster.yml`. The Kubernetes version should only be listed under the `system_images` directive if an [unsupported version](#using-an-unsupported-kubernetes-version) is being used. Refer to [Kubernetes version precedence](#kubernetes-version-precedence) for more information. -- Ensure that the correct files to manage [Kubernetes cluster state]({{< baseurl >}}/rke/latest/en/installation/#kubernetes-cluster-state) are present in the working directory. Refer to the tabs below for the required files, which differ based on the RKE version. +- Ensure that the correct files to manage [Kubernetes cluster state]({{}}/rke/latest/en/installation/#kubernetes-cluster-state) are present in the working directory. Refer to the tabs below for the required files, which differ based on the RKE version. {{% tabs %}} {{% tab "RKE v0.2.0+" %}} @@ -86,7 +86,7 @@ As of v0.2.0, if a version is defined in `kubernetes_version` and is not found i Prior to v0.2.0, if a version is defined in `kubernetes_version` and is not found in the specific list of supported Kubernetes versions, the default version from the supported list is used. -If you want to use a different version from the supported list, please use the [system images]({{< baseurl >}}/rke/latest/en/config-options/system-images/) option. +If you want to use a different version from the supported list, please use the [system images]({{}}/rke/latest/en/config-options/system-images/) option. ### Mapping the Kubernetes Version to Services @@ -98,7 +98,7 @@ For RKE prior to v0.3.0, the service defaults are located [here](https://github. ### Service Upgrades -[Services]({{< baseurl >}}/rke/latest/en/config-options/services/) can be upgraded by changing any of the services arguments or `extra_args` and running `rke up` again with the updated configuration file. +[Services]({{}}/rke/latest/en/config-options/services/) can be upgraded by changing any of the services arguments or `extra_args` and running `rke up` again with the updated configuration file. > **Note:** The following arguments, `service_cluster_ip_range` or `cluster_cidr`, cannot be changed as any changes to these arguments will result in a broken cluster. Currently, network pods are not automatically upgraded. @@ -106,4 +106,4 @@ For RKE prior to v0.3.0, the service defaults are located [here](https://github. As of v0.1.8, upgrades to add-ons are supported. -[Add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/) can also be upgraded by changing any of the add-ons and running `rke up` again with the updated configuration file. +[Add-ons]({{}}/rke/latest/en/config-options/add-ons/) can also be upgraded by changing any of the add-ons and running `rke up` again with the updated configuration file.