From 9cdf2e0032b394c9d1617c78c8efc45f4ec07818 Mon Sep 17 00:00:00 2001 From: Catherine Luse Date: Wed, 19 Aug 2020 14:10:00 -0700 Subject: [PATCH] Reorganize docs for Rancher v2.5 --- content/rancher/v2.5/en/_index.md | 12 - content/rancher/v2.5/en/api/_index.md | 52 -- .../rancher/v2.5/en/api/api-keys/_index.md | 50 -- .../rancher/v2.5/en/api/api-tokens/_index.md | 29 - content/rancher/v2.5/en/backups/_index.md | 14 - .../rancher/v2.5/en/backups/legacy/_index.md | 6 - .../v2.5/en/backups/legacy/docker/_index.md | 6 - .../legacy/docker/docker-backups/_index.md | 69 -- .../legacy/docker/docker-restore/_index.md | 68 -- .../legacy/special-scenarios/_index.md | 74 -- content/rancher/v2.5/en/cli/_index.md | 80 --- .../v2.5/en/cli/backups-etcd/_index.md | 8 - .../v2.5/en/cli/disaster-recovery/_index.md | 8 - .../rancher/v2.5/en/cli/rancher-k8s/_index.md | 16 - .../v2.5/en/cluster-explorer/_index.md | 113 --- .../certificate-rotation/_index.md | 37 - .../cluster-explorer/certificates/_index.md | 43 -- .../cluster-autoscaler/_index.md | 25 - .../cluster-autoscaler/amazon/_index.md | 580 ---------------- .../en/cluster-explorer/configmaps/_index.md | 42 -- .../load-balancers-and-ingress/_index.md | 61 -- .../ingress/_index.md | 80 --- .../load-balancers/_index.md | 72 -- .../en/cluster-explorer/pipelines/_index.md | 268 -------- .../pipelines/concepts/_index.md | 36 - .../pipelines/config/_index.md | 645 ------------------ .../pipelines/docs-for-v2.0.x/_index.md | 123 ---- .../pipelines/example-repos/_index.md | 74 -- .../pipelines/example/_index.md | 72 -- .../pipelines/storage/_index.md | 103 --- .../pod-security-policy/_index.md | 30 - .../cluster-explorer/project-admin/_index.md | 41 -- .../project-admin/namespaces/_index.md | 68 -- .../project-admin/project-features/_index.md | 41 -- .../project-features/alerts/_index.md | 184 ----- .../project-features/istio/_index.md | 19 - .../project-features/logging/_index.md | 108 --- .../project-features/monitoring/_index.md | 81 --- .../project-admin/resource-quotas/_index.md | 42 -- .../override-container-default/_index.md | 39 -- .../override-namespace-default/_index.md | 34 - .../quota-type-reference/_index.md | 24 - .../quotas-for-projects/_index.md | 41 -- .../en/cluster-explorer/registries/_index.md | 116 ---- .../en/cluster-explorer/secrets/_index.md | 44 -- .../service-discovery/_index.md | 50 -- .../en/cluster-explorer/storage/_index.md | 58 -- .../attaching-existing-storage/_index.md | 102 --- .../storage/examples/_index.md | 11 - .../storage/examples/ebs/_index.md | 16 - .../storage/examples/nfs/_index.md | 66 -- .../storage/examples/vsphere/_index.md | 68 -- .../storage/glusterfs-volumes/_index.md | 32 - .../storage/how-storage-works/_index.md | 76 --- .../storage/iscsi-volumes/_index.md | 30 - .../provisioning-new-storage/_index.md | 109 --- .../en/cluster-explorer/workloads/_index.md | 79 --- .../workloads/add-a-sidecar/_index.md | 35 - .../workloads/deploy-workloads/_index.md | 57 -- .../horitzontal-pod-autoscaler/_index.md | 35 - .../hpa-background/_index.md | 40 -- .../manage-hpa-with-kubectl/_index.md | 200 ------ .../manage-hpa-with-rancher-ui/_index.md | 53 -- .../testing-hpa/_index.md | 491 ------------- .../workloads/rollback-workloads/_index.md | 14 - .../workloads/upgrade-workloads/_index.md | 21 - .../rancher/v2.5/en/contributing/_index.md | 120 ---- content/rancher/v2.5/en/ecm/_index.md | 50 -- .../v2.5/en/ecm/access-control/_index.md | 36 - .../access-control/authentication/_index.md | 98 --- .../authentication/ad/_index.md | 197 ------ .../authentication/azure-ad/_index.md | 204 ------ .../authentication/freeipa/_index.md | 52 -- .../authentication/github/_index.md | 51 -- .../authentication/google/_index.md | 106 --- .../authentication/keycloak/_index.md | 119 ---- .../authentication/local/_index.md | 14 - .../authentication/microsoft-adfs/_index.md | 35 - .../microsoft-adfs-setup/_index.md | 82 --- .../rancher-adfs-setup/_index.md | 44 -- .../authentication/okta/_index.md | 51 -- .../authentication/openldap/_index.md | 48 -- .../openldap/openldap-config/_index.md | 86 --- .../authentication/ping-federate/_index.md | 51 -- .../authentication/shibboleth/_index.md | 107 --- .../authentication/shibboleth/about/_index.md | 32 - .../authentication/user-groups/_index.md | 60 -- .../v2.5/en/ecm/access-control/rbac/_index.md | 28 - .../rbac/add-users-to-cluster/_index.md | 55 -- .../rbac/default-custom-roles/_index.md | 158 ----- .../rbac/locked-roles/_index.md | 37 - .../en/ecm/access-control/rbac/mcm/_index.md | 7 - .../ecm/access-control/rbac/mcm/ace/_index.md | 43 -- .../rbac/mcm/cluster-project-roles/_index.md | 183 ----- .../rbac/mcm/custom-global-roles/_index.md | 6 - .../rbac/mcm/global-permissions/_index.md | 172 ----- .../access-control/rbac/mcm/kubectl/_index.md | 103 --- .../rbac/mcm/project-members/_index.md | 50 -- .../en/ecm/backing-up-a-cluster/_index.md | 137 ---- .../v2.5/en/ecm/best-practices/_index.md | 23 - .../v2.5/en/ecm/best-practices/mcm/_index.md | 6 - .../architecture-recommendations/_index.md | 119 ---- .../best-practices/mcm/containers/_index.md | 49 -- .../mcm/deployment-strategies/_index.md | 45 -- .../mcm/deployment-types/_index.md | 38 -- .../best-practices/mcm/management/_index.md | 132 ---- .../best-practices/rancher-server/_index.md | 6 - .../v2.5/en/ecm/cloning-clusters/_index.md | 99 --- .../en/ecm/cluster-configuration/_index.md | 68 -- .../en/ecm/config-private-registry/_index.md | 43 -- .../v2.5/en/ecm/disconnecting-nodes/_index.md | 279 -------- content/rancher/v2.5/en/ecm/drivers/_index.md | 44 -- .../en/ecm/drivers/cluster-drivers/_index.md | 42 -- .../en/ecm/drivers/node-drivers/_index.md | 37 - .../v2.5/en/ecm/feature-flags/_index.md | 154 ----- .../_index.md | 40 -- .../istio-virtual-service-ui/_index.md | 31 - .../rancher/v2.5/en/ecm/globaldns/_index.md | 118 ---- .../v2.5/en/ecm/infrastructure/_index.md | 4 - .../cloud-credentials/_index.md | 49 -- .../infrastructure/cloud-providers/_index.md | 39 -- .../cloud-providers/amazon/_index.md | 150 ---- .../cloud-providers/azure/_index.md | 70 -- .../cloud-providers/gce/_index.md | 54 -- .../infrastructure/node-templates/_index.md | 47 -- .../en/ecm/infrastructure/nodes/_index.md | 224 ------ .../v2.5/en/ecm/k8s-metadata/_index.md | 89 --- .../_index.md | 31 - .../en/ecm/pod-security-policies/_index.md | 82 --- .../en/ecm/projects-and-namespaces/_index.md | 194 ------ .../v2.5/en/ecm/requirements/_index.md | 314 --------- .../v2.5/en/ecm/requirements/ports/_index.md | 186 ----- .../en/ecm/restoring-from-backup/_index.md | 111 --- .../v2.5/en/ecm/rke-templates/_index.md | 125 ---- .../applying-templates/_index.md | 61 -- .../creating-and-revising/_index.md | 162 ----- .../creator-permissions/_index.md | 50 -- .../ecm/rke-templates/enforcement/_index.md | 38 -- .../rke-templates/example-scenarios/_index.md | 71 -- .../ecm/rke-templates/example-yaml/_index.md | 112 --- .../en/ecm/rke-templates/overrides/_index.md | 15 - .../rke-templates-and-hardware/_index.md | 70 -- .../template-access-and-sharing/_index.md | 61 -- .../v2.5/en/ecm/setting-up-k8s/_index.md | 90 --- .../cluster-capabilities-table/index.md | 22 - .../hosted-kubernetes-clusters/_index.md | 32 - .../hosted-kubernetes-clusters/ack/_index.md | 46 -- .../hosted-kubernetes-clusters/aks/_index.md | 140 ---- .../hosted-kubernetes-clusters/cce/_index.md | 80 --- .../hosted-kubernetes-clusters/eks/_index.md | 289 -------- .../hosted-kubernetes-clusters/gke/_index.md | 41 -- .../hosted-kubernetes-clusters/tke/_index.md | 75 -- .../imported-clusters/_index.md | 177 ----- .../imported-clusters/eks/_index.md | 4 - .../imported-clusters/k3s/_index.md | 4 - .../imported-clusters/rancher-k8s/_index.md | 4 - .../production-checklist/_index.md | 50 -- .../nodes-and-roles/_index.md | 43 -- .../recommended-architecture/_index.md | 74 -- .../ecm/setting-up-k8s/rke-clusters/_index.md | 34 - .../cluster-config-reference/_index.md | 386 ----------- .../pod-security-policies/_index.md | 19 - .../rke-clusters/custom-nodes/_index.md | 113 --- .../custom-nodes/agent-options/_index.md | 54 -- .../infrastructure-provider/_index.md | 123 ---- .../infrastructure-provider/azure/_index.md | 48 -- .../digital-ocean/_index.md | 43 -- .../infrastructure-provider/ec2/_index.md | 266 -------- .../infrastructure-provider/vsphere/_index.md | 41 -- .../provisioning-vsphere-clusters/_index.md | 317 --------- .../creating-credentials/_index.md | 41 -- .../enabling-uuids/_index.md | 24 - .../node-template-reference/_index.md | 93 --- .../rke-clusters/node-requirements/_index.md | 184 ----- .../rke-clusters/rancher-agents/_index.md | 39 -- .../rke-clusters/windows-clusters/_index.md | 282 -------- .../docs-for-2.1-and-2.2/_index.md | 176 ----- .../host-gateway-requirements/_index.md | 37 - .../en/ecm/upgrading-kubernetes/_index.md | 159 ----- .../v2.5/en/ecm/upgrading-mcm/_index.md | 6 - .../en/ecm/upgrading-mcm/rollbacks/_index.md | 6 - content/rancher/v2.5/en/faq/_index.md | 70 -- content/rancher/v2.5/en/faq/kubectl/_index.md | 30 - .../v2.5/en/faq/mcm/networking/_index.md | 9 - .../mcm/networking/cni-providers/_index.md | 152 ----- .../en/faq/mcm/removing-rancher/_index.md | 52 -- .../v2.5/en/faq/mcm/technical/_index.md | 189 ----- .../rancher/v2.5/en/faq/security/_index.md | 15 - .../rancher/v2.5/en/faq/telemetry/_index.md | 32 - .../v2.5/en/install-rancher-on-k8s/_index.md | 18 - .../install-rancher-on-k8s/install/_index.md | 274 -------- .../install/chart-options/_index.md | 255 ------- .../install/choosing-version/_index.md | 91 --- .../install/helm-version/_index.md | 12 - .../install/resources/_index.md | 30 - .../install/resources/advanced/_index.md | 6 - .../advanced/api-audit-log/_index.md | 577 ---------------- .../advanced/arm64-platform/_index.md | 25 - .../install/resources/advanced/etcd/_index.md | 40 -- .../resources/advanced/firewall/_index.md | 106 --- .../install/resources/air-gap/_index.md | 24 - .../air-gap/install-rancher/_index.md | 343 ---------- .../air-gap/local-system-charts/_index.md | 65 -- .../populate-private-registry/_index.md | 275 -------- .../air-gap/prepare-registry/_index.md | 25 - .../install/resources/encryption/_index.md | 6 - .../custom-ca-root-certificate/_index.md | 25 - .../encryption/tls-secrets/_index.md | 36 - .../encryption/tls-settings/_index.md | 32 - .../upgrading-cert-manager/_index.md | 234 ------- .../install/resources/k8s-tutorials/_index.md | 6 - .../resources/k8s-tutorials/ha-RKE/_index.md | 170 ----- .../resources/k8s-tutorials/ha-RKE2/_index.md | 7 - .../k8s-tutorials/how-ha-works/_index.md | 25 - .../infrastructure-tutorials/_index.md | 15 - .../ec2-node/_index.md | 64 -- .../infrastructure-tutorials/nginx/_index.md | 83 --- .../infrastructure-tutorials/nlb/_index.md | 179 ----- .../infrastructure-tutorials/rds/_index.md | 34 - .../resources/single-node-docker/_index.md | 150 ---- .../single-node-docker/advanced/_index.md | 91 --- .../installing-docker/_index.md | 18 - .../single-node-docker/proxy/_index.md | 36 - .../single-node-install-external-lb/_index.md | 240 ------- .../install-rancher-on-k8s/upgrade/_index.md | 222 ------ .../upgrade/rollbacks/_index.md | 12 - .../en/install-rancher-on-linux/_index.md | 15 - .../install/_index.md | 10 - .../upgrade/_index.md | 28 - .../upgrade/cluster-upgrade/_index.md | 8 - .../woker-node-upgrade/_index.md | 8 - .../upgrade/image-upgrade/_index.md | 6 - content/rancher/v2.5/en/istio/_index.md | 84 --- .../rancher/v2.5/en/istio/legacy/_index.md | 6 - .../en/istio/legacy/disabling-istio/_index.md | 27 - .../v2.5/en/istio/legacy/rbac/_index.md | 58 -- .../en/istio/legacy/release-notes/_index.md | 19 - .../v2.5/en/istio/legacy/resources/_index.md | 148 ---- .../v2.5/en/istio/legacy/setup/_index.md | 28 - .../legacy/setup/deploy-workloads/_index.md | 322 --------- .../setup/enable-istio-in-cluster/_index.md | 24 - .../enable-istio-with-psp/_index.md | 48 -- .../setup/enable-istio-in-namespace/_index.md | 48 -- .../en/istio/legacy/setup/gateway/_index.md | 130 ---- .../legacy/setup/node-selectors/_index.md | 38 -- .../setup/set-up-traffic-management/_index.md | 61 -- .../istio/legacy/setup/view-traffic/_index.md | 26 - .../rancher/v2.5/en/logging/legacy/_index.md | 118 ---- .../en/logging/legacy/elasticsearch/_index.md | 41 -- .../v2.5/en/logging/legacy/fluentd/_index.md | 34 - .../v2.5/en/logging/legacy/kafka/_index.md | 41 -- .../v2.5/en/logging/legacy/splunk/_index.md | 73 -- .../v2.5/en/logging/legacy/syslog/_index.md | 41 -- .../v2.5/en/longhorn-storage/_index.md | 6 - .../v2.5/en/longhorn-storage/legacy/_index.md | 4 - .../v2.5/en/managing-applications/_index.md | 101 --- .../creating-apps/_index.md | 123 ---- .../creating-custom-libraries/_index.md | 100 --- .../enabling-builtin-libraries/_index.md | 25 - .../launching-apps/_index.md | 100 --- .../library-config/_index.md | 71 -- .../managing-apps/_index.md | 82 --- .../multi-cluster-apps/_index.md | 162 ----- .../managing-applications/tutorial/_index.md | 72 -- .../v2.5/en/monitoring/legacy/_index.md | 6 - .../en/monitoring/legacy/alerts/_index.md | 243 ------- .../legacy/alerts/default-alerts/_index.md | 57 -- .../legacy/alerts/expression/_index.md | 430 ------------ .../en/monitoring/legacy/monitoring/_index.md | 4 - .../monitoring/cluster-metrics/_index.md | 110 --- .../monitoring/custom-metrics/_index.md | 489 ------------- .../legacy/monitoring/prometheus/_index.md | 60 -- .../monitoring/viewing-metrics/_index.md | 58 -- .../en/monitoring/legacy/notifiers/_index.md | 130 ---- .../rancher/v2.5/en/opa-gatekeper/_index.md | 6 - .../v2.5/en/opa-gatekeper/legacy/_index.md | 94 --- content/rancher/v2.5/en/overview/_index.md | 66 -- .../v2.5/en/overview/architecture/_index.md | 203 ------ .../v2.5/en/overview/concepts/_index.md | 72 -- content/rancher/v2.5/en/security/_index.md | 101 --- .../rancher/v2.5/en/security/cve/_index.md | 17 - .../v2.5/en/security/security-scan/_index.md | 254 ------- .../rancher/v2.5/en/system-tools/_index.md | 116 ---- .../rancher/v2.5/en/troubleshooting/_index.md | 42 -- .../v2.5/en/troubleshooting/docker/_index.md | 6 - .../v2.5/en/troubleshooting/mcm/_index.md | 187 ----- .../v2.5/en/troubleshooting/mcm/dns/_index.md | 217 ------ .../mcm/imported-clusters/_index.md | 62 -- .../en/troubleshooting/mcm/logging/_index.md | 48 -- .../troubleshooting/mcm/networking/_index.md | 118 ---- .../v2.5/en/troubleshooting/rke/_index.md | 18 - .../rke/controlplane/_index.md | 40 -- .../en/troubleshooting/rke/etcd/_index.md | 365 ---------- .../rke/kubernetes-resources/_index.md | 271 -------- .../troubleshooting/rke/nginx-proxy/_index.md | 69 -- .../troubleshooting/rke/rancher_rke/_index.md | 80 --- .../rke/worker-and-generic/_index.md | 35 - .../v2.5/en/user-preferences/_index.md | 34 - content/rancher/v2.x/_index.md | 2 +- content/rancher/v2.x/en/_index.md | 11 +- .../tools/opa-gatekeper/_index.md | 114 ---- .../cluster-capabilities-table/index.md | 22 - .../hosted-kubernetes-clusters/_index.md | 2 +- .../hosted-kubernetes-clusters/eks/_index.md | 5 +- .../imported-clusters/_index.md | 4 +- .../cluster-provisioning/production/_index.md | 2 +- .../registering/_index.md | 4 + .../rke-clusters/_index.md | 2 +- content/rancher/v2.x/en/dashboard/_index.md | 4 + .../en/dashboard/backup-restore/_index.md | 4 + .../v2.x/en/dashboard/cis-scans/_index.md | 4 + .../en/dashboard/cluster-explorer/_index.md | 4 + .../rancher/v2.x/en/dashboard/istio/_index.md | 4 + .../en/dashboard}/logging/_index.md | 6 +- .../dashboard/monitoring-alerting}/_index.md | 60 +- .../rancher/v2.x/en/dashboard/opa/_index.md | 4 + .../rancher/v2.x/en/dashboard/repos/_index.md | 4 + .../k8s-install/helm-rancher/_index.md | 6 +- .../air-gap-helm2/install-rancher/_index.md | 6 +- .../populate-private-registry/_index.md | 4 +- .../options/chart-options/_index.md | 2 +- .../options/helm2/helm-rancher/_index.md | 2 +- .../helm-rancher/chart-options/_index.md | 2 +- .../helm2/helm-rancher/tls-secrets/_index.md | 4 +- .../options/tls-secrets/_index.md | 4 +- .../options/upgrading-cert-manager/_index.md | 28 +- .../helm-2-instructions/_index.md | 13 +- .../air-gap/install-rancher/_index.md | 11 +- .../populate-private-registry/_index.md | 4 +- .../requirements/installing-docker/_index.md | 6 +- .../en/security/benchmark-2.3.5/_index.md | 105 ++- .../v2.x/en/security/benchmark-2.4/_index.md | 90 ++- .../v2.x/en/security/hardening-2.3/_index.md | 18 +- 333 files changed, 308 insertions(+), 26590 deletions(-) delete mode 100644 content/rancher/v2.5/en/_index.md delete mode 100644 content/rancher/v2.5/en/api/_index.md delete mode 100644 content/rancher/v2.5/en/api/api-keys/_index.md delete mode 100644 content/rancher/v2.5/en/api/api-tokens/_index.md delete mode 100644 content/rancher/v2.5/en/backups/_index.md delete mode 100644 content/rancher/v2.5/en/backups/legacy/_index.md delete mode 100644 content/rancher/v2.5/en/backups/legacy/docker/_index.md delete mode 100644 content/rancher/v2.5/en/backups/legacy/docker/docker-backups/_index.md delete mode 100644 content/rancher/v2.5/en/backups/legacy/docker/docker-restore/_index.md delete mode 100644 content/rancher/v2.5/en/backups/legacy/special-scenarios/_index.md delete mode 100644 content/rancher/v2.5/en/cli/_index.md delete mode 100644 content/rancher/v2.5/en/cli/backups-etcd/_index.md delete mode 100644 content/rancher/v2.5/en/cli/disaster-recovery/_index.md delete mode 100644 content/rancher/v2.5/en/cli/rancher-k8s/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/certificate-rotation/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/certificates/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/cluster-autoscaler/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/cluster-autoscaler/amazon/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/configmaps/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/load-balancers-and-ingress/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/load-balancers-and-ingress/ingress/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/load-balancers-and-ingress/load-balancers/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/pipelines/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/pipelines/concepts/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/pipelines/config/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/pipelines/docs-for-v2.0.x/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/pipelines/example-repos/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/pipelines/example/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/pipelines/storage/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/pod-security-policy/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/project-admin/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/project-admin/namespaces/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/alerts/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/istio/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/logging/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/monitoring/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/override-container-default/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/override-namespace-default/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/quota-type-reference/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/quotas-for-projects/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/registries/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/secrets/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/service-discovery/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/storage/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/storage/attaching-existing-storage/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/storage/examples/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/storage/examples/ebs/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/storage/examples/nfs/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/storage/examples/vsphere/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/storage/glusterfs-volumes/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/storage/how-storage-works/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/storage/iscsi-volumes/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/storage/provisioning-new-storage/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/workloads/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/workloads/add-a-sidecar/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/workloads/deploy-workloads/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/hpa-background/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/testing-hpa/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/workloads/rollback-workloads/_index.md delete mode 100644 content/rancher/v2.5/en/cluster-explorer/workloads/upgrade-workloads/_index.md delete mode 100644 content/rancher/v2.5/en/contributing/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/ad/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/azure-ad/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/freeipa/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/github/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/google/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/keycloak/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/local/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/microsoft-adfs/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/microsoft-adfs/rancher-adfs-setup/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/okta/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/openldap/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/openldap/openldap-config/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/ping-federate/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/shibboleth/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/shibboleth/about/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/authentication/user-groups/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/rbac/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/rbac/add-users-to-cluster/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/rbac/default-custom-roles/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/rbac/locked-roles/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/rbac/mcm/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/rbac/mcm/ace/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/rbac/mcm/cluster-project-roles/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/rbac/mcm/custom-global-roles/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/rbac/mcm/global-permissions/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/rbac/mcm/kubectl/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/access-control/rbac/mcm/project-members/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/backing-up-a-cluster/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/best-practices/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/best-practices/mcm/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/best-practices/mcm/architecture-recommendations/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/best-practices/mcm/containers/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/best-practices/mcm/deployment-strategies/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/best-practices/mcm/deployment-types/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/best-practices/mcm/management/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/best-practices/rancher-server/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/cloning-clusters/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/cluster-configuration/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/config-private-registry/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/disconnecting-nodes/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/drivers/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/drivers/cluster-drivers/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/drivers/node-drivers/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/feature-flags/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/feature-flags/enable-not-default-storage-drivers/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/feature-flags/istio-virtual-service-ui/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/globaldns/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/infrastructure/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/infrastructure/cloud-credentials/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/amazon/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/azure/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/gce/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/infrastructure/node-templates/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/infrastructure/nodes/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/k8s-metadata/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/pod-security-policies-in-projects/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/pod-security-policies/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/projects-and-namespaces/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/requirements/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/requirements/ports/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/restoring-from-backup/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/rke-templates/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/rke-templates/applying-templates/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/rke-templates/creating-and-revising/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/rke-templates/creator-permissions/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/rke-templates/enforcement/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/rke-templates/example-scenarios/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/rke-templates/example-yaml/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/rke-templates/overrides/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/rke-templates/rke-templates-and-hardware/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/rke-templates/template-access-and-sharing/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/cluster-capabilities-table/index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/ack/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/aks/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/cce/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/eks/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/gke/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/tke/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/eks/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/k3s/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/rancher-k8s/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/production-checklist/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/production-checklist/nodes-and-roles/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/production-checklist/recommended-architecture/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/cluster-config-reference/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/cluster-config-reference/pod-security-policies/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/custom-nodes/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/custom-nodes/agent-options/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/azure/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/digital-ocean/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/ec2/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/creating-credentials/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/enabling-uuids/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/node-template-reference/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/node-requirements/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/rancher-agents/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/windows-clusters/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/windows-clusters/host-gateway-requirements/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/upgrading-kubernetes/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/upgrading-mcm/_index.md delete mode 100644 content/rancher/v2.5/en/ecm/upgrading-mcm/rollbacks/_index.md delete mode 100644 content/rancher/v2.5/en/faq/_index.md delete mode 100644 content/rancher/v2.5/en/faq/kubectl/_index.md delete mode 100644 content/rancher/v2.5/en/faq/mcm/networking/_index.md delete mode 100644 content/rancher/v2.5/en/faq/mcm/networking/cni-providers/_index.md delete mode 100644 content/rancher/v2.5/en/faq/mcm/removing-rancher/_index.md delete mode 100644 content/rancher/v2.5/en/faq/mcm/technical/_index.md delete mode 100644 content/rancher/v2.5/en/faq/security/_index.md delete mode 100644 content/rancher/v2.5/en/faq/telemetry/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/chart-options/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/choosing-version/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/helm-version/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/api-audit-log/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/arm64-platform/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/etcd/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/firewall/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/install-rancher/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/local-system-charts/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/populate-private-registry/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/prepare-registry/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/custom-ca-root-certificate/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/tls-secrets/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/tls-settings/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/upgrading-cert-manager/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/ha-RKE/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/ha-RKE2/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/how-ha-works/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/advanced/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/installing-docker/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/proxy/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/single-node-install-external-lb/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/upgrade/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-k8s/upgrade/rollbacks/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-linux/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-linux/install/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-linux/upgrade/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-linux/upgrade/cluster-upgrade/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-linux/upgrade/cluster-upgrade/woker-node-upgrade/_index.md delete mode 100644 content/rancher/v2.5/en/install-rancher-on-linux/upgrade/image-upgrade/_index.md delete mode 100644 content/rancher/v2.5/en/istio/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/disabling-istio/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/rbac/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/release-notes/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/resources/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/setup/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/setup/deploy-workloads/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/setup/enable-istio-in-cluster/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/setup/enable-istio-in-namespace/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/setup/gateway/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/setup/node-selectors/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/setup/set-up-traffic-management/_index.md delete mode 100644 content/rancher/v2.5/en/istio/legacy/setup/view-traffic/_index.md delete mode 100644 content/rancher/v2.5/en/logging/legacy/_index.md delete mode 100644 content/rancher/v2.5/en/logging/legacy/elasticsearch/_index.md delete mode 100644 content/rancher/v2.5/en/logging/legacy/fluentd/_index.md delete mode 100644 content/rancher/v2.5/en/logging/legacy/kafka/_index.md delete mode 100644 content/rancher/v2.5/en/logging/legacy/splunk/_index.md delete mode 100644 content/rancher/v2.5/en/logging/legacy/syslog/_index.md delete mode 100644 content/rancher/v2.5/en/longhorn-storage/_index.md delete mode 100644 content/rancher/v2.5/en/longhorn-storage/legacy/_index.md delete mode 100644 content/rancher/v2.5/en/managing-applications/_index.md delete mode 100644 content/rancher/v2.5/en/managing-applications/creating-apps/_index.md delete mode 100644 content/rancher/v2.5/en/managing-applications/creating-custom-libraries/_index.md delete mode 100644 content/rancher/v2.5/en/managing-applications/enabling-builtin-libraries/_index.md delete mode 100644 content/rancher/v2.5/en/managing-applications/launching-apps/_index.md delete mode 100644 content/rancher/v2.5/en/managing-applications/library-config/_index.md delete mode 100644 content/rancher/v2.5/en/managing-applications/managing-apps/_index.md delete mode 100644 content/rancher/v2.5/en/managing-applications/multi-cluster-apps/_index.md delete mode 100644 content/rancher/v2.5/en/managing-applications/tutorial/_index.md delete mode 100644 content/rancher/v2.5/en/monitoring/legacy/_index.md delete mode 100644 content/rancher/v2.5/en/monitoring/legacy/alerts/_index.md delete mode 100644 content/rancher/v2.5/en/monitoring/legacy/alerts/default-alerts/_index.md delete mode 100644 content/rancher/v2.5/en/monitoring/legacy/alerts/expression/_index.md delete mode 100644 content/rancher/v2.5/en/monitoring/legacy/monitoring/_index.md delete mode 100644 content/rancher/v2.5/en/monitoring/legacy/monitoring/cluster-metrics/_index.md delete mode 100644 content/rancher/v2.5/en/monitoring/legacy/monitoring/custom-metrics/_index.md delete mode 100644 content/rancher/v2.5/en/monitoring/legacy/monitoring/prometheus/_index.md delete mode 100644 content/rancher/v2.5/en/monitoring/legacy/monitoring/viewing-metrics/_index.md delete mode 100644 content/rancher/v2.5/en/monitoring/legacy/notifiers/_index.md delete mode 100644 content/rancher/v2.5/en/opa-gatekeper/_index.md delete mode 100644 content/rancher/v2.5/en/opa-gatekeper/legacy/_index.md delete mode 100644 content/rancher/v2.5/en/overview/_index.md delete mode 100644 content/rancher/v2.5/en/overview/architecture/_index.md delete mode 100644 content/rancher/v2.5/en/overview/concepts/_index.md delete mode 100644 content/rancher/v2.5/en/security/_index.md delete mode 100644 content/rancher/v2.5/en/security/cve/_index.md delete mode 100644 content/rancher/v2.5/en/security/security-scan/_index.md delete mode 100644 content/rancher/v2.5/en/system-tools/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/docker/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/mcm/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/mcm/dns/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/mcm/imported-clusters/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/mcm/logging/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/mcm/networking/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/rke/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/rke/controlplane/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/rke/etcd/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/rke/kubernetes-resources/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/rke/nginx-proxy/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/rke/rancher_rke/_index.md delete mode 100644 content/rancher/v2.5/en/troubleshooting/rke/worker-and-generic/_index.md delete mode 100644 content/rancher/v2.5/en/user-preferences/_index.md delete mode 100644 content/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table/index.md create mode 100644 content/rancher/v2.x/en/cluster-provisioning/registering/_index.md create mode 100644 content/rancher/v2.x/en/dashboard/_index.md create mode 100644 content/rancher/v2.x/en/dashboard/backup-restore/_index.md create mode 100644 content/rancher/v2.x/en/dashboard/cis-scans/_index.md create mode 100644 content/rancher/v2.x/en/dashboard/cluster-explorer/_index.md create mode 100644 content/rancher/v2.x/en/dashboard/istio/_index.md rename content/rancher/{v2.5/en => v2.x/en/dashboard}/logging/_index.md (94%) rename content/rancher/{v2.5/en/monitoring => v2.x/en/dashboard/monitoring-alerting}/_index.md (52%) create mode 100644 content/rancher/v2.x/en/dashboard/opa/_index.md create mode 100644 content/rancher/v2.x/en/dashboard/repos/_index.md diff --git a/content/rancher/v2.5/en/_index.md b/content/rancher/v2.5/en/_index.md deleted file mode 100644 index 84944368e11..00000000000 --- a/content/rancher/v2.5/en/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: "Rancher 2.5" -shortTitle: "Rancher 2.5" -description: "Rancher adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." -metaTitle: "Rancher 2.5 Docs: What is New?" -metaDescription: "Rancher 2 adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." -insertOneSix: false -weight: 1 -ctaBanner: 0 ---- - -> This page is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/api/_index.md b/content/rancher/v2.5/en/api/_index.md deleted file mode 100644 index f873346680c..00000000000 --- a/content/rancher/v2.5/en/api/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: API -weight: 19 ---- - -## How to use the API - -The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint as well as create [API keys]({{}}/rancher/v2.x/en/user-settings/api-keys/). - -## Authentication - -API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys]({{}}/rancher/v2.x/en/user-settings/api-keys/). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. - -By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page]({{}}/rancher/v2.x/en/api/api-tokens). - -## Making requests - -The API is generally RESTful but has several features to make the definition of everything discoverable by a client so that generic clients can be written instead of having to write specific code for every type of resource. For detailed info about the generic API spec, [see here](https://github.com/rancher/api-spec/blob/master/specification.md). - -- Every type has a Schema which describes: - - The URL to get to the collection of this type of resources - - Every field the resource can have, along with their type, basic validation rules, whether they are required or optional, etc. - - Every action that is possible on this type of resource, with their inputs and outputs (also as schemas). - - Every field that filtering is allowed on - - What HTTP verb methods are available for the collection itself, or for individual resources in the collection. - - -- So the theory is that you can load just the list of schemas and know everything about the API. This is in fact how the UI for the API works, it contains no code specific to Rancher itself. The URL to get Schemas is sent in every HTTP response as a `X-Api-Schemas` header. From there you can follow the `collection` link on each schema to know where to list resources, and other `links` inside of the returned resources to get any other information. - -- In practice, you will probably just want to construct URL strings. We highly suggest limiting this to the top-level to list a collection (`/v3/`) or get a specific resource (`/v3//`). Anything deeper than that is subject to change in future releases. - -- Resources have relationships between each other called links. Each resource includes a map of `links` with the name of the link and the URL to retrieve that information. Again you should `GET` the resource and then follow the URL in the `links` map, not construct these strings yourself. - -- Most resources have actions, which do something or change the state of the resource. To use these, send a HTTP `POST` to the URL in the `actions` map for the action you want. Some actions require input or produce output, see the individual documentation for each type or the schemas for specific information. - -- To edit a resource, send a HTTP `PUT` to the `links.update` link on the resource with the fields that you want to change. If the link is missing then you don't have permission to update the resource. Unknown fields and ones that are not editable are ignored. - -- To delete a resource, send a HTTP `DELETE` to the `links.remove` link on the resource. If the link is missing then you don't have permission to update the resource. - -- To create a new resource, HTTP `POST` to the collection URL in the schema (which is `/v3/`). - -## Filtering - -Most collections can be filtered on the server-side by common fields using HTTP query parameters. The `filters` map shows you what fields can be filtered on and what the filtered values were for the request you made. The API UI has controls to setup filtering and show you the appropriate request. For simple "equals" matches it's just `field=value`. Modifiers can be added to the field name, e.g. `field_gt=42` for "field is greater than 42". See the [API spec](https://github.com/rancher/api-spec/blob/master/specification.md#filtering) for full details. - -## Sorting - -Most collections can be sorted on the server-side by common fields using HTTP query parameters. The `sortLinks` map shows you what sorts are available, along with the URL to get the collection sorted by that. It also includes info about what the current response was sorted by, if specified. - -## Pagination - -API responses are paginated with a limit of 100 resources per page by default. This can be changed with the `limit` query parameter, up to a maximum of 1000, e.g. `/v3/pods?limit=1000`. The `pagination` map in collection responses tells you whether or not you have the full result set and has a link to the next page if you do not. diff --git a/content/rancher/v2.5/en/api/api-keys/_index.md b/content/rancher/v2.5/en/api/api-keys/_index.md deleted file mode 100644 index 1075ef67e6f..00000000000 --- a/content/rancher/v2.5/en/api/api-keys/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: API Keys -weight: 3 ---- - -If you want to access your Rancher clusters, projects, or other objects using external applications, you can do so using the Rancher API. However, before your application can access the API, you must provide the app with a key used to authenticate with Rancher. You can obtain a key using the Rancher UI. - -An API key is also required for using Rancher CLI. - -API Keys are composed of four components: - -- **Endpoint:** This is the IP address and path that other applications use to send requests to the Rancher API. -- **Access Key:** The token's username. -- **Secret Key:** The token's password. For applications that prompt you for two different strings for API authentication, you usually enter the two keys together. -- **Bearer Token:** The token username and password concatenated together. Use this string for applications that prompt you for one authentication string. - -## Creating an API Key - -1. Select **User Avatar** > **API & Keys** from the **User Settings** menu in the upper-right. - -2. Click **Add Key**. - -3. **Optional:** Enter a description for the API key and select an expiration period or a scope. We recommend setting an expiration date. - - The API key won't be valid after expiration. Shorter expiration periods are more secure. - - A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) for more information. - -4. Click **Create**. - - **Step Result:** Your API Key is created. Your API **Endpoint**, **Access Key**, **Secret Key**, and **Bearer Token** are displayed. - - Use the **Bearer Token** to authenticate with Rancher CLI. - -5. Copy the information displayed to a secure location. This information is only displayed once, so if you lose your key, you'll have to make a new one. - -## What's Next? - -- Enter your API key information into the application that will send requests to the Rancher API. -- Learn more about the Rancher endpoints and parameters by selecting **View in API** for an object in the Rancher UI. -- API keys are used for API calls and [Rancher CLI]({{}}/rancher/v2.x/en/cli). - -## Deleting API Keys - -If you need to revoke an API key, delete it. You should delete API keys: - -- That may have been compromised. -- That have expired. - -To delete an API, select the stale key and click **Delete**. diff --git a/content/rancher/v2.5/en/api/api-tokens/_index.md b/content/rancher/v2.5/en/api/api-tokens/_index.md deleted file mode 100644 index eb823bb7cea..00000000000 --- a/content/rancher/v2.5/en/api/api-tokens/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: API Tokens -weight: 1 ---- - -By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. Tokens are not invalidated by changing a password. - -You can deactivate API tokens by deleting them or by deactivating the user account. - -To delete a token, - -1. Go to the list of all tokens in the Rancher API view at `https:///v3/tokens`. - -1. Access the token you want to delete by its ID. For example, `https:///v3/tokens/kubectl-shell-user-vqkqt` - -1. Click **Delete.** - -Here is the complete list of tokens that are generated with `ttl=0`: - -| Token | Description | -|-------|-------------| -| `kubeconfig-*` | Kubeconfig token | -| `kubectl-shell-*` | Access to `kubectl` shell in the browser | -| `agent-*` | Token for agent deployment | -| `compose-token-*` | Token for compose | -| `helm-token-*` | Token for Helm chart deployment | -| `*-pipeline*` | Pipeline token for project | -| `telemetry-*` | Telemetry token | -| `drain-node-*` | Token for drain (we use `kubectl` for drain because there is no native Kubernetes API) | diff --git a/content/rancher/v2.5/en/backups/_index.md b/content/rancher/v2.5/en/backups/_index.md deleted file mode 100644 index 595fdeb458f..00000000000 --- a/content/rancher/v2.5/en/backups/_index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Backups and Disaster Recovery -weight: 8 ---- - -> This section is under construction. - -This section is devoted to protecting your data in a disaster scenario. - -To protect yourself from a disaster scenario, you should create backups on a regular basis. - -We recommend using the backup/restore application to back up Rancher and to restore it from backup. - -The Helm chart for the application is available as in Rancher. After you have enabled the application, you will be able to use backup templates for Rancher, Fleet, and the Enterprise Cluster Manager. diff --git a/content/rancher/v2.5/en/backups/legacy/_index.md b/content/rancher/v2.5/en/backups/legacy/_index.md deleted file mode 100644 index 3845dfe3d96..00000000000 --- a/content/rancher/v2.5/en/backups/legacy/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Legacy Backup and Restore Docs -weight: 2 ---- - -> This section is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/backups/legacy/docker/_index.md b/content/rancher/v2.5/en/backups/legacy/docker/_index.md deleted file mode 100644 index 3306f194e62..00000000000 --- a/content/rancher/v2.5/en/backups/legacy/docker/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Rancher Installed with Docker -weight: 4 ---- - -> This section is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/backups/legacy/docker/docker-backups/_index.md b/content/rancher/v2.5/en/backups/legacy/docker/docker-backups/_index.md deleted file mode 100644 index 97825f9069a..00000000000 --- a/content/rancher/v2.5/en/backups/legacy/docker/docker-backups/_index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Backing up Rancher Installed with Docker -weight: 4 ---- - -After completing your Docker installation of Rancher, we recommend creating backups of it on a regular basis. Having a recent backup will let you recover quickly from an unexpected disaster. - -## Before You Start - -During the creation of your backup, you'll enter a series of commands, replacing placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker run --volumes-from rancher-data- -v $PWD:/backup busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher -``` - -In this command, `` is a placeholder for the date that the data container and backup were created. `9-27-18` for example. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the [procedure below](#creating-a-backup). - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version of Rancher that you're creating a backup for. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped with `docker ps -a`. Use these commands for help anytime while creating backups. - -## Creating a Backup - -This procedure creates a backup that you can restore if Rancher encounters a disaster scenario. - - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Stop the container currently running Rancher Server. Replace `` with the [name of your Rancher container](#before-you-start). - - ``` - docker stop - ``` -1. Use the command below, replacing each [placeholder](#before-you-start), to create a data container from the Rancher container that you just stopped. - - ``` - docker create --volumes-from --name rancher-data- rancher/rancher: - ``` - -1. From the data container that you just created (`rancher-data-`), create a backup tarball (`rancher-data-backup--.tar.gz`). Use the following command, replacing each [placeholder](#before-you-start). - - ``` - docker run --volumes-from rancher-data- -v $PWD:/backup:z busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher - ``` - - **Step Result:** A stream of commands runs on the screen. - -1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. - -1. Move your backup tarball to a safe location external to your Rancher Server. Then delete the `rancher-data-` container from your Rancher Server. - -1. Restart Rancher Server. Replace `` with the name of your [Rancher container](#before-you-start). - - ``` - docker start - ``` - -**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs]({{}}/rancher/v2.x/en/backups/restorations/single-node-restoration) if you need to restore backup data. diff --git a/content/rancher/v2.5/en/backups/legacy/docker/docker-restore/_index.md b/content/rancher/v2.5/en/backups/legacy/docker/docker-restore/_index.md deleted file mode 100644 index 18941da7d84..00000000000 --- a/content/rancher/v2.5/en/backups/legacy/docker/docker-restore/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Restoring Rancher Installed with Docker -weight: 3 ---- - -If you encounter a disaster scenario, you can restore your Rancher Server to your most recent backup. - -## Before You Start - -When restoring your backup, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker run --volumes-from -v $PWD:/backup \ -busybox sh -c "rm /var/lib/rancher/* -rf && \ -tar pzxvf /backup/rancher-data-backup--" -``` - -In this command, `` and `-` are environment variables for your Rancher deployment. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the [procedure below](#creating-a-backup). - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version number for your Rancher backup. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -## Restoring Backups - -Using a [backup]({{}}/rancher/v2.x/en/backups/backups/single-node-backups/) that you created earlier, restore Rancher to its last known healthy state. - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Stop the container currently running Rancher Server. Replace `` with the [name of your Rancher container](#before-you-start). - - ``` - docker stop - ``` -1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs]({{}}/rancher/v2.x/en/backups/backups/single-node-backups/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - - If you followed the naming convention we suggested in [Creating Backups—Docker Installs]({{}}/rancher/v2.x/en/backups/backups/single-node-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. - -1. Enter the following command to delete your current state data and replace it with your backup data, replacing the [placeholders](#before-you-start). Don't forget to close the quotes. - - >**Warning!** This command deletes all current state data from your Rancher Server container. Any changes saved after your backup tarball was created will be lost. - - ``` - docker run --volumes-from -v $PWD:/backup \ - busybox sh -c "rm /var/lib/rancher/* -rf && \ - tar pzxvf /backup/rancher-data-backup--.tar.gz" - ``` - - **Step Result:** A series of commands should run. - -1. Restart your Rancher Server container, replacing the [placeholder](#before-you-start). It will restart using your backup data. - - ``` - docker start - ``` - -1. Wait a few moments and then open Rancher in a web browser. Confirm that the restoration succeeded and that your data is restored. diff --git a/content/rancher/v2.5/en/backups/legacy/special-scenarios/_index.md b/content/rancher/v2.5/en/backups/legacy/special-scenarios/_index.md deleted file mode 100644 index 889376b492a..00000000000 --- a/content/rancher/v2.5/en/backups/legacy/special-scenarios/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Special Scenarios for Rollbacks -weight: 40 ---- - -If you are rolling back to versions in either of these scenarios, you must follow some extra instructions in order to get your clusters working. - -- Rolling back from v2.1.6+ to any version between v2.1.0 - v2.1.5 or v2.0.0 - v2.0.10. -- Rolling back from v2.0.11+ to any version between v2.0.0 - v2.0.10. - -Because of the changes necessary to address [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321), special steps are necessary if the user wants to roll back to a previous version of Rancher where this vulnerability exists. The steps are as follows: - -1. Record the `serviceAccountToken` for each cluster. To do this, save the following script on a machine with `kubectl` access to the Rancher management plane and execute it. You will need to run these commands on the machine where the rancher container is running. Ensure JQ is installed before running the command. The commands will vary depending on how you installed Rancher. - - - **Rancher Installed with Docker** - ``` - docker exec kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json - ``` - - **Rancher Installed on a Kubernetes Cluster** - ``` - kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json - ``` - -2. After executing the command a `tokens.json` file will be created. Important! Back up this file in a safe place.** You will need it to restore functionality to your clusters after rolling back Rancher. **If you lose this file, you may lose access to your clusters.** - -3. Rollback Rancher following the [normal instructions]({{}}/rancher/v2.x/en/upgrades/rollbacks/). - -4. Once Rancher comes back up, every cluster managed by Rancher (except for Imported clusters) will be in an `Unavailable` state. - -5. Apply the backed up tokens based on how you installed Rancher. - - **Rancher Installed with Docker** - - Save the following script as `apply_tokens.sh` to the machine where the Rancher docker container is running. Also copy the `tokens.json` file created previously to the same directory as the script. - ``` - set -e - - tokens=$(jq .[] -c tokens.json) - for token in $tokens; do - name=$(echo $token | jq -r .name) - value=$(echo $token | jq -r .token) - - docker exec $1 kubectl patch --type=merge clusters $name -p "{\"status\": {\"serviceAccountToken\": \"$value\"}}" - done - ``` - the script to allow execution (`chmod +x apply_tokens.sh`) and execute the script as follows: - ``` - ./apply_tokens.sh - ``` - After a few moments the clusters will go from Unavailable back to Available. - - **Rancher Installed on a Kubernetes Cluster** - - Save the following script as `apply_tokens.sh` to a machine with kubectl access to the Rancher management plane. Also copy the `tokens.json` file created previously to the same directory as the script. - ``` - set -e - - tokens=$(jq .[] -c tokens.json) - for token in $tokens; do - name=$(echo $token | jq -r .name) - value=$(echo $token | jq -r .token) - - kubectl patch --type=merge clusters $name -p "{\"status\": {\"serviceAccountToken\": \"$value\"}}" - done - ``` - Set the script to allow execution (`chmod +x apply_tokens.sh`) and execute the script as follows: - ``` - ./apply_tokens.sh - ``` - After a few moments the clusters will go from `Unavailable` back to `Available`. - -6. Continue using Rancher as normal. diff --git a/content/rancher/v2.5/en/cli/_index.md b/content/rancher/v2.5/en/cli/_index.md deleted file mode 100644 index 6dafdd1aedd..00000000000 --- a/content/rancher/v2.5/en/cli/_index.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: The Rancher Command Line Interface -description: The Rancher CLI is a unified tool that you can use to interact with Rancher. With it, you can operate Rancher using a command line interface rather than the GUI -metaTitle: "Using the Rancher Command Line Interface " -metaDescription: "The Rancher CLI is a unified tool that you can use to interact with Rancher. With it, you can operate Rancher using a command line interface rather than the GUI" -weight: 16 ---- - -The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. - -### Download Rancher CLI - -The binary can be downloaded directly from the UI. The link can be found in the right hand side of the footer in the UI. We have binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. - -### Requirements - -After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - -- Your [Rancher Server URL]({{}}/rancher/v2.x/en/admin-settings/server-url), which is used to connect to Rancher Server. -- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key]({{}}/rancher/v2.x/en/user-settings/api-keys/). - -### CLI Authentication - -Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): - -```bash -$ ./rancher login https:// --token -``` - -If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. - -### Project Selection - -Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. - -**Example: `./rancher context switch` Output** -``` -User:rancher-cli-directory user$ ./rancher context switch -NUMBER CLUSTER NAME PROJECT ID PROJECT NAME -1 cluster-2 c-7q96s:p-h4tmb project-2 -2 cluster-2 c-7q96s:project-j6z6d Default -3 cluster-1 c-lchzv:p-xbpdt project-1 -4 cluster-1 c-lchzv:project-s2mch Default -Select a Project: -``` - -After you enter a number, the console displays a message that you've changed projects. - -``` -INFO[0005] Setting new context to project project-1 -INFO[0005] Saving config to /Users/markbishop/.rancher/cli2.json -``` - -### Commands - -The following commands are available for use in Rancher CLI. - -| Command | Result | -|---|---| -| `apps, [app]` | Performs operations on catalog applications (i.e. individual [Helm charts](https://docs.helm.sh/developing_charts/) or [Rancher charts]({{}}/rancher/v2.x/en/catalog/custom/#chart-directory-structure)). | -| `catalog` | Performs operations on [catalogs]({{}}/rancher/v2.x/en/catalog/). | -| `clusters, [cluster]` | Performs operations on your [clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). | -| `context` | Switches between Rancher [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). For an example, see [Project Selection](#project-selection). | -| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) and [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/)). Specify resources by name or ID. | -| `kubectl` |Runs [kubectl commands](https://kubernetes.io/docs/reference/kubectl/overview/#operations). | -| `login, [l]` | Logs into a Rancher Server. For an example, see [CLI Authentication](#cli-authentication). | -| `namespaces, [namespace]` |Performs operations on [namespaces]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). | -| `nodes, [node]` |Performs operations on [nodes]({{}}/rancher/v2.x/en/overview/architecture/#kubernetes). | -| `projects, [project]` | Performs operations on [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). | -| `ps` | Displays [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads) in a project. | -| `settings, [setting]` | Shows the current settings for your Rancher Server. | -| `ssh` | Connects to one of your cluster nodes using the SSH protocol. | -| `help, [h]` | Shows a list of commands or help for one command. | - - -### Rancher CLI Help - -Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. - -All commands accept the `--help` flag, which documents each command's usage. diff --git a/content/rancher/v2.5/en/cli/backups-etcd/_index.md b/content/rancher/v2.5/en/cli/backups-etcd/_index.md deleted file mode 100644 index 2c191c772ab..00000000000 --- a/content/rancher/v2.5/en/cli/backups-etcd/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Backups - Etcd snapshot -weight: 1 ---- - -The Rancher Kubernetes cluster can be restored from an etcd snapshot. - -If Rancher was installed on another type of Kubernetes, refer to the official documentation of the Kubernetes distribution for more information about backing up the cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cli/disaster-recovery/_index.md b/content/rancher/v2.5/en/cli/disaster-recovery/_index.md deleted file mode 100644 index 6662f7bda76..00000000000 --- a/content/rancher/v2.5/en/cli/disaster-recovery/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Disaster Recovery - Etcd Restore Snapshot -weight: 1 ---- - -The Rancher Kubernetes cluster can be restored from an etcd snapshot. - -If Rancher was installed on another type of Kubernetes, refer to the official documentation of the Kubernetes distribution for more information about backing up the cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cli/rancher-k8s/_index.md b/content/rancher/v2.5/en/cli/rancher-k8s/_index.md deleted file mode 100644 index acc0615f314..00000000000 --- a/content/rancher/v2.5/en/cli/rancher-k8s/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Rancher Kubernetes -weight: 1 ---- - -> This page is under construction. - -The Rancher CLI comes with a Kubernetes distribution called Rancher Kubernetes, which allows you to set up a Kubernetes cluster more easily as a prerequisite to installing Rancher. - -Rancher Kubernetes is based on K3s, and has more secure default settings. It is a new feature in Rancher 2.5. - -Rancher Kubernetes clusters can also be imported into Rancher. - -Rancher Kubernetes is not to be confused with RKE Kubernetes or K3s Kubernetes, which are separate Kubernetes distributions provided by Rancher. RKE is the oldest of the three distributions. When the Enterprise Cluster Manager is enabled, Rancher can provision RKE Kubernetes clusters, but Rancher Kubernetes clusters and K3s Kubernetes clusters have to be installed separately and imported into Rancher. - -In other words, Rancher can only install Rancher Kubernetes when you are using the Rancher CLI to set up a local Kubernetes cluster for the Rancher server. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/_index.md b/content/rancher/v2.5/en/cluster-explorer/_index.md deleted file mode 100644 index dc57d68980d..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/_index.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Cluster Explorer -weight: 5 ---- - -> This section is under construction. - -After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. - -This page covers the following topics: - -- [Switching between clusters](#switching-between-clusters) -- [Managing clusters in Rancher](#managing-clusters-in-rancher) -- [Configuring tools](#configuring-tools) - -> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.x/en/overview/concepts) page. - -## Switching between Clusters - -To switch between clusters, use the drop-down available in the navigation bar. - -Alternatively, you can switch between projects and clusters directly in the navigation bar. Open the **Global** view and select **Clusters** from the main menu. Then select the name of the cluster you want to open. - -## Managing Clusters in Rancher - -After clusters have been [provisioned into Rancher]({{}}/rancher/v2.x/en/cluster-provisioning/), [cluster owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. - -{{% include file="/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table" %}} - -## Configuring Tools - -Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - -- Alerts -- Notifiers -- Logging -- Monitoring -- Istio Service Mesh -- OPA Gatekeeper - -For more information, see [Tools]({{}}/rancher/v2.x/en/cluster-admin/tools/) - - - -When your project is set up, [project members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can start managing their applications and all the components that comprise it. - -## Workloads - -Deploy applications to your cluster nodes using [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/), which are objects that contain pods that run your apps, along with metadata that set rules for the deployment's behavior. Workloads can be deployed within the scope of the entire clusters or within a namespace. - -When deploying a workload, you can deploy from any image. There are a variety of [workload types]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/#workload-types) to choose from which determine how your application should run. - -Following a workload deployment, you can continue working with it. You can: - -- [Upgrade]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads) the workload to a newer version of the application it's running. -- [Roll back]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads) a workload to a previous version, if an issue occurs during upgrade. -- [Add a sidecar]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar), which is a workload that supports a primary workload. - -## Load Balancing and Ingress - -### Load Balancers - -After you launch an application, it's only available within the cluster. It can't be reached externally. - -If you want your applications to be externally accessible, you must add a load balancer to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. - -Rancher supports two types of load balancers: - -- [Layer-4 Load Balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) -- [Layer-7 Load Balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) - -For more information, see [load balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). - -#### Ingress - -Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. You can get around this issue by using an ingress. - -Ingress is a set or rules that act as a load balancer. Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster program the load balancer to direct the request to the correct service based on service subdomains or path rules that you've configured. - -For more information, see [Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress). - -When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. - -For more information, see [Global DNS]({{}}/rancher/v2.x/en/catalog/globaldns/). - -## Service Discovery - -After you expose your cluster to external requests using a load balancer and/or ingress, it's only available by IP address. To create a resolveable hostname, you must create a service record, which is a record that maps an IP address, external hostname, DNS record alias, workload(s), or labelled pods to a specific hostname. - -For more information, see [Service Discovery]({{}}/rancher/v2.x/en/k8s-in-rancher/service-discovery). - -## Pipelines - -After your project has been [configured to a version control provider]({{}}/rancher/v2.x/en/project-admin/pipelines/#version-control-providers), you can add the repositories and start configuring a pipeline for each repository. - -For more information, see [Pipelines]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/). - -## Applications - -Besides launching individual components of an application, you can use the Rancher catalog to start launching applications, which are Helm charts. - -For more information, see [Applications in a Project]({{}}/rancher/v2.x/en/catalog/apps/). - -## Kubernetes Resources - -Within the context of a Rancher project or namespace, _resources_ are files and data that support operation of your pods. Within Rancher, certificates, registries, and secrets are all considered resources. However, Kubernetes classifies resources as different types of [secrets](https://kubernetes.io/docs/concepts/configuration/secret/). Therefore, within a single project or namespace, individual resources must have unique names to avoid conflicts. Although resources are primarily used to carry sensitive information, they have other uses as well. - -Resources include: - -- [Certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/): Files used to encrypt/decrypt data entering or leaving the cluster. -- [ConfigMaps]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/): Files that store general configuration information, such as a group of config files. -- [Secrets]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/): Files that store sensitive data like passwords, tokens, or keys. -- [Registries]({{}}/rancher/v2.x/en/k8s-in-rancher/registries/): Files that carry credentials used to authenticate with private registries. diff --git a/content/rancher/v2.5/en/cluster-explorer/certificate-rotation/_index.md b/content/rancher/v2.5/en/cluster-explorer/certificate-rotation/_index.md deleted file mode 100644 index 8b2d81f49e9..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/certificate-rotation/_index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Certificate Rotation -weight: 2 ---- - -> **Warning:** Rotating Kubernetes certificates may result in your cluster being temporarily unavailable as components are restarted. For production environments, it's recommended to perform this action during a maintenance window. - -By default, Kubernetes clusters require certificates and Rancher launched Kubernetes clusters automatically generate certificates for the Kubernetes components. Rotating these certificates is important before the certificates expire as well as if a certificate is compromised. After the certificates are rotated, the Kubernetes components are automatically restarted. - -Certificates can be rotated for the following services: - -- etcd -- kubelet -- kube-apiserver -- kube-proxy -- kube-scheduler -- kube-controller-manager - - -### Certificate Rotation - -Rancher launched Kubernetes clusters have the ability to rotate the auto-generated certificates through the UI. - -1. In the **Global** view, navigate to the cluster that you want to rotate certificates. - -2. Select the **⋮ > Rotate Certificates**. - -3. Select which certificates that you want to rotate. - - * Rotate all Service certificates (keep the same CA) - * Rotate an individual service and choose one of the services from the drop down menu - -4. Click **Save**. - -**Results:** The selected certificates will be rotated and the related services will be restarted to start using the new certificate. - -> **Note:** Even though the RKE CLI can use custom certificates for the Kubernetes cluster components, Rancher currently doesn't allow the ability to upload these in Rancher Launched Kubernetes clusters. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/certificates/_index.md b/content/rancher/v2.5/en/cluster-explorer/certificates/_index.md deleted file mode 100644 index 56a660ad63d..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/certificates/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Encrypting HTTP Communication -description: Learn how to add an SSL (Secure Sockets Layer) certificate or TLS (Transport Layer Security) certificate to either a project, a namespace, or both, so that you can add it to deployments -weight: 1 ---- - -When you create an ingress within Rancher/Kubernetes, you must provide it with a secret that includes a TLS private key and certificate, which are used to encrypt and decrypt communications that come through the ingress. You can make certificates available for ingress use by navigating to its project or namespace, and then uploading the certificate. You can then add the certificate to the ingress deployment. - -Add SSL certificates to either projects, namespaces, or both. A project scoped certificate will be available in all its namespaces. - ->**Prerequisites:** You must have a TLS private key and certificate available to upload. - -1. From the **Global** view, select the project where you want to deploy your ingress. - -1. From the main menu, select **Resources > Secrets > Certificates**. Click **Add Certificate**. (For Rancher prior to v2.3, click **Resources > Certificates.**) - -1. Enter a **Name** for the certificate. - - >**Note:** Kubernetes classifies SSL certificates as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your SSL certificate must have a unique name among the other certificates, registries, and secrets within your project/workspace. - -1. Select the **Scope** of the certificate. - - - **Available to all namespaces in this project:** The certificate is available for any deployment in any namespaces in the project. - - - **Available to a single namespace:** The certificate is only available for the deployments in one [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). If you choose this option, select a **Namespace** from the drop-down list or click **Add to a new namespace** to add the certificate to a namespace you create on the fly. - -1. From **Private Key**, either copy and paste your certificate's private key into the text box (include the header and footer), or click **Read from a file** to browse to the private key on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. - - Private key files end with an extension of `.key`. - -1. From **Certificate**, either copy and paste your certificate into the text box (include the header and footer), or click **Read from a file** to browse to the certificate on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. - - Certificate files end with an extension of `.crt`. - -**Result:** Your certificate is added to the project or namespace. You can now add it to deployments. - -- If you added an SSL certificate to the project, the certificate is available for deployments created in any project namespace. -- If you added an SSL certificate to a namespace, the certificate is available only for deployments in that namespace. -- Your certificate is added to the **Resources > Secrets > Certificates** view. (For Rancher prior to v2.3, it is added to **Resources > Certificates.**) - -## What's Next? - -Now you can add the certificate when launching an ingress within the current project or namespace. For more information, see [Adding Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/). diff --git a/content/rancher/v2.5/en/cluster-explorer/cluster-autoscaler/_index.md b/content/rancher/v2.5/en/cluster-explorer/cluster-autoscaler/_index.md deleted file mode 100644 index 4f1d2e14430..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/cluster-autoscaler/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Cluster Autoscaler -weight: 1 ---- - -In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. - -The cluster autoscaler is a tool that automatically adjusts the size of the Kubernetes cluster when one of the following conditions is true: - -* There are pods that failed to run in the cluster due to insufficient resources. -* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. - -To prevent your pod from being evicted, set a `priorityClassName: system-cluster-critical` property on your pod spec. - -Cluster Autoscaler is designed to run on Kubernetes master nodes. It can run in the `kube-system` namespace. Cluster Autoscaler doesn't scale down nodes with non-mirrored `kube-system` pods running on them. - -It's possible to run a customized deployment of Cluster Autoscaler on worker nodes, but extra care needs to be taken to ensure that Cluster Autoscaler remains up and running. - -# Cloud Providers - -Cluster Autoscaler provides support to distinct cloud providers. For more information, go to [cluster-autoscaler supported cloud providers.](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#deployment) - -### Setting up Cluster Autoscaler on Amazon Cloud Provider - -For details on running the cluster autoscaler on Amazon cloud provider, refer to [this page.]({{}}/rancher/v2.x/en/cluster-admin/cluster-autoscaler/amazon) diff --git a/content/rancher/v2.5/en/cluster-explorer/cluster-autoscaler/amazon/_index.md b/content/rancher/v2.5/en/cluster-explorer/cluster-autoscaler/amazon/_index.md deleted file mode 100644 index d173d444e27..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/cluster-autoscaler/amazon/_index.md +++ /dev/null @@ -1,580 +0,0 @@ ---- -title: Cluster Autoscaler with AWS EC2 Auto Scaling Groups -weight: 1 ---- - -This guide will show you how to install and use [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. - -We are going to install a Rancher RKE custom cluster with a fixed number of nodes with the etcd and controlplane roles, and a variable nodes with the worker role, managed by `cluster-autoscaler`. - -- [Prerequisites](#prerequisites) -- [1. Create a Custom Cluster](#1-create-a-custom-cluster) -- [2. Configure the Cloud Provider](#2-configure-the-cloud-provider) -- [3. Deploy Nodes](#3-deploy-nodes) -- [4. Install cluster-autoscaler](#4-install-cluster-autoscaler) - - [Parameters](#parameters) - - [Deployment](#deployment) -- [Testing](#testing) - - [Generating Load](#generating-load) - - [Checking Scale](#checking-scale) - -# Prerequisites - -These elements are required to follow this guide: - -* The Rancher server is up and running -* You have an AWS EC2 user with proper permissions to create virtual machines, auto scaling groups, and IAM profiles and roles - -### 1. Create a Custom Cluster - -On Rancher server, we should create a custom k8s cluster v1.18.x. Be sure that cloud_provider name is set to `amazonec2`. Once cluster is created we need to get: - -* clusterID: `c-xxxxx` will be used on EC2 `kubernetes.io/cluster/` instance tag -* clusterName: will be used on EC2 `k8s.io/cluster-autoscaler/` instance tag -* nodeCommand: will be added on EC2 instance user_data to include new nodes on cluster - - ```sh - sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum - ``` - -### 2. Configure the Cloud Provider - -On AWS EC2, we should create a few objects to configure our system. We've defined three distinct groups and IAM profiles to configure on AWS. - -1. Autoscaling group: Nodes that will be part of the EC2 Auto Scaling Group (ASG). The ASG will be used by `cluster-autoscaler` to scale up and down. - * IAM profile: Required by k8s nodes where cluster-autoscaler will be running. It is recommended for Kubernetes master nodes. This profile is called `K8sAutoscalerProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "autoscaling:DescribeTags", - "autoscaling:DescribeLaunchConfigurations", - "ec2:DescribeLaunchTemplateVersions" - ], - "Resource": [ - "*" - ] - } - ] - } - ``` - -2. Master group: Nodes that will be part of the Kubernetes etcd and/or control planes. This will be out of the ASG. - * IAM profile: Required by the Kubernetes cloud_provider integration. Optionally, `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` can be used instead [using-aws-credentials.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) This profile is called `K8sMasterProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } - ] - } - ``` - - * IAM role: `K8sMasterRole: [K8sMasterProfile,K8sAutoscalerProfile]` - * Security group: `K8sMasterSg` More info at[RKE ports (custom nodes tab)]({{}}/rancher/v2.x/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) - * Tags: - `kubernetes.io/cluster/: owned` - * User data: `K8sMasterUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add etcd+controlplane node to the k8s cluster - - ```sh - #!/bin/bash -x - - cat < /etc/sysctl.d/90-kubelet.conf - vm.overcommit_memory = 1 - vm.panic_on_oom = 0 - kernel.panic = 10 - kernel.panic_on_oops = 1 - kernel.keys.root_maxkeys = 1000000 - kernel.keys.root_maxbytes = 25000000 - EOF - sysctl -p /etc/sysctl.d/90-kubelet.conf - - curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh - sudo usermod -aG docker ubuntu - - TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) - PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) - K8S_ROLES="--etcd --controlplane" - - sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} - ``` - -3. Worker group: Nodes that will be part of the k8s worker plane. Worker nodes will be scaled by cluster-autoscaler using the ASG. - * IAM profile: Provides cloud_provider worker integration. - This profile is called `K8sWorkerProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } - ] - } - ``` - - * IAM role: `K8sWorkerRole: [K8sWorkerProfile]` - * Security group: `K8sWorkerSg` More info at [RKE ports (custom nodes tab)]({{}}/rancher/v2.x/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) - * Tags: - * `kubernetes.io/cluster/: owned` - * `k8s.io/cluster-autoscaler/: true` - * `k8s.io/cluster-autoscaler/enabled: true` - * User data: `K8sWorkerUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add worker node to the k8s cluster - - ```sh - #!/bin/bash -x - - cat < /etc/sysctl.d/90-kubelet.conf - vm.overcommit_memory = 1 - vm.panic_on_oom = 0 - kernel.panic = 10 - kernel.panic_on_oops = 1 - kernel.keys.root_maxkeys = 1000000 - kernel.keys.root_maxbytes = 25000000 - EOF - sysctl -p /etc/sysctl.d/90-kubelet.conf - - curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh - sudo usermod -aG docker ubuntu - - TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) - PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) - K8S_ROLES="--worker" - - sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} - ``` - -More info is at [RKE clusters on AWS]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/) and [Cluster Autoscaler on AWS.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) - -### 3. Deploy Nodes - -Once we've configured AWS, let's create VMs to bootstrap our cluster: - -* master (etcd+controlplane): Depending your needs, deploy three master instances with proper size. More info is at [the recommendations for production-ready clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/production/) - * IAM role: `K8sMasterRole` - * Security group: `K8sMasterSg` - * Tags: - * `kubernetes.io/cluster/: owned` - * User data: `K8sMasterUserData` - -* worker: Define an ASG on EC2 with the following settings: - * Name: `K8sWorkerAsg` - * IAM role: `K8sWorkerRole` - * Security group: `K8sWorkerSg` - * Tags: - * `kubernetes.io/cluster/: owned` - * `k8s.io/cluster-autoscaler/: true` - * `k8s.io/cluster-autoscaler/enabled: true` - * User data: `K8sWorkerUserData` - * Instances: - * minimum: 2 - * desired: 2 - * maximum: 10 - -Once the VMs are deployed, you should have a Rancher custom cluster up and running with three master and two worker nodes. - -### 4. Install Cluster-autoscaler - -At this point, we should have rancher cluster up and running. We are going to install cluster-autoscaler on master nodes and `kube-system` namespace, following cluster-autoscaler recommendation. - -#### Parameters - -This table shows cluster-autoscaler parameters for fine tuning: - -| Parameter | Default | Description | -|---|---|---| -|cluster-name|-|Autoscaled cluster name, if available| -|address|:8085|The address to expose Prometheus metrics| -|kubernetes|-|Kubernetes master location. Leave blank for default| -|kubeconfig|-|Path to kubeconfig file with authorization and master location information| -|cloud-config|-|The path to the cloud provider configuration file. Empty string for no configuration file| -|namespace|"kube-system"|Namespace in which cluster-autoscaler run| -|scale-down-enabled|true|Should CA scale down the cluster| -|scale-down-delay-after-add|"10m"|How long after scale up that scale down evaluation resumes| -|scale-down-delay-after-delete|0|How long after node deletion that scale down evaluation resumes, defaults to scanInterval| -|scale-down-delay-after-failure|"3m"|How long after scale down failure that scale down evaluation resumes| -|scale-down-unneeded-time|"10m"|How long a node should be unneeded before it is eligible for scale down| -|scale-down-unready-time|"20m"|How long an unready node should be unneeded before it is eligible for scale down| -|scale-down-utilization-threshold|0.5|Sum of cpu or memory of all pods running on the node divided by node's corresponding allocatable resource, below which a node can be considered for scale down| -|scale-down-gpu-utilization-threshold|0.5|Sum of gpu requests of all pods running on the node divided by node's allocatable resource, below which a node can be considered for scale down| -|scale-down-non-empty-candidates-count|30|Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain| -|scale-down-candidates-pool-ratio|0.1|A ratio of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| -|scale-down-candidates-pool-min-count|50|Minimum number of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| -|node-deletion-delay-timeout|"2m"|Maximum time CA waits for removing delay-deletion.cluster-autoscaler.kubernetes.io/ annotations before deleting the node| -|scan-interval|"10s"|How often cluster is reevaluated for scale up or down| -|max-nodes-total|0|Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number| -|cores-total|"0:320000"|Minimum and maximum number of cores in cluster, in the format :. Cluster autoscaler will not scale the cluster beyond these numbers| -|memory-total|"0:6400000"|Minimum and maximum number of gigabytes of memory in cluster, in the format :. Cluster autoscaler will not scale the cluster beyond these numbers| -cloud-provider|-|Cloud provider type| -|max-bulk-soft-taint-count|10|Maximum number of nodes that can be tainted/untainted PreferNoSchedule at the same time. Set to 0 to turn off such tainting| -|max-bulk-soft-taint-time|"3s"|Maximum duration of tainting/untainting nodes as PreferNoSchedule at the same time| -|max-empty-bulk-delete|10|Maximum number of empty nodes that can be deleted at the same time| -|max-graceful-termination-sec|600|Maximum number of seconds CA waits for pod termination when trying to scale down a node| -|max-total-unready-percentage|45|Maximum percentage of unready nodes in the cluster. After this is exceeded, CA halts operations| -|ok-total-unready-count|3|Number of allowed unready nodes, irrespective of max-total-unready-percentage| -|scale-up-from-zero|true|Should CA scale up when there 0 ready nodes| -|max-node-provision-time|"15m"|Maximum time CA waits for node to be provisioned| -|nodes|-|sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: ::| -|node-group-auto-discovery|-|One or more definition(s) of node group auto-discovery. A definition is expressed `:[[=]]`| -|estimator|-|"binpacking"|Type of resource estimator to be used in scale up. Available values: ["binpacking"]| -|expander|"random"|Type of node group expander to be used in scale up. Available values: `["random","most-pods","least-waste","price","priority"]`| -|ignore-daemonsets-utilization|false|Should CA ignore DaemonSet pods when calculating resource utilization for scaling down| -|ignore-mirror-pods-utilization|false|Should CA ignore Mirror pods when calculating resource utilization for scaling down| -|write-status-configmap|true|Should CA write status information to a configmap| -|max-inactivity|"10m"|Maximum time from last recorded autoscaler activity before automatic restart| -|max-failing-time|"15m"|Maximum time from last recorded successful autoscaler run before automatic restart| -|balance-similar-node-groups|false|Detect similar node groups and balance the number of nodes between them| -|node-autoprovisioning-enabled|false|Should CA autoprovision node groups when needed| -|max-autoprovisioned-node-group-count|15|The maximum number of autoprovisioned groups in the cluster| -|unremovable-node-recheck-timeout|"5m"|The timeout before we check again a node that couldn't be removed before| -|expendable-pods-priority-cutoff|-10|Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable| -|regional|false|Cluster is regional| -|new-pod-scale-up-delay|"0s"|Pods less than this old will not be considered for scale-up| -|ignore-taint|-|Specifies a taint to ignore in node templates when considering to scale a node group| -|balancing-ignore-label|-|Specifies a label to ignore in addition to the basic and cloud-provider set of labels when comparing if two node groups are similar| -|aws-use-static-instance-list|false|Should CA fetch instance types in runtime or use a static list. AWS only| -|profiling|false|Is debug/pprof endpoint enabled| - -#### Deployment - -Based on [cluster-autoscaler-run-on-master.yaml](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml) example, we've created our own `cluster-autoscaler-deployment.yaml` to use preferred [auto-discovery setup](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup), updating tolerations, nodeSelector, image version and command config: - - -```yml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler - name: cluster-autoscaler - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cluster-autoscaler - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -rules: - - apiGroups: [""] - resources: ["events", "endpoints"] - verbs: ["create", "patch"] - - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] - - apiGroups: [""] - resources: ["pods/status"] - verbs: ["update"] - - apiGroups: [""] - resources: ["endpoints"] - resourceNames: ["cluster-autoscaler"] - verbs: ["get", "update"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["watch", "list", "get", "update"] - - apiGroups: [""] - resources: - - "pods" - - "services" - - "replicationcontrollers" - - "persistentvolumeclaims" - - "persistentvolumes" - verbs: ["watch", "list", "get"] - - apiGroups: ["extensions"] - resources: ["replicasets", "daemonsets"] - verbs: ["watch", "list", "get"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["watch", "list"] - - apiGroups: ["apps"] - resources: ["statefulsets", "replicasets", "daemonsets"] - verbs: ["watch", "list", "get"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["watch", "list", "get"] - - apiGroups: ["batch", "extensions"] - resources: ["jobs"] - verbs: ["get", "list", "watch", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["create"] - - apiGroups: ["coordination.k8s.io"] - resourceNames: ["cluster-autoscaler"] - resources: ["leases"] - verbs: ["get", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["create","list","watch"] - - apiGroups: [""] - resources: ["configmaps"] - resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] - verbs: ["delete", "get", "update", "watch"] - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cluster-autoscaler - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-autoscaler -subjects: - - kind: ServiceAccount - name: cluster-autoscaler - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: cluster-autoscaler -subjects: - - kind: ServiceAccount - name: cluster-autoscaler - namespace: kube-system - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - app: cluster-autoscaler -spec: - replicas: 1 - selector: - matchLabels: - app: cluster-autoscaler - template: - metadata: - labels: - app: cluster-autoscaler - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '8085' - spec: - serviceAccountName: cluster-autoscaler - tolerations: - - effect: NoSchedule - operator: "Equal" - value: "true" - key: node-role.kubernetes.io/controlplane - nodeSelector: - node-role.kubernetes.io/controlplane: "true" - containers: - - image: eu.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v1.18.1 - name: cluster-autoscaler - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 300Mi - command: - - ./cluster-autoscaler - - --v=4 - - --stderrthreshold=info - - --cloud-provider=aws - - --skip-nodes-with-local-storage=false - - --expander=least-waste - - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ - volumeMounts: - - name: ssl-certs - mountPath: /etc/ssl/certs/ca-certificates.crt - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - -``` - -Once the manifest file is prepared, deploy it in the Kubernetes cluster (Rancher UI can be used instead): - -```sh -kubectl -n kube-system apply -f cluster-autoscaler-deployment.yaml -``` - -**Note:** Cluster-autoscaler deployment can also be set up using [manual configuration](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#manual-configuration) - -# Testing - -At this point, we should have a cluster-scaler up and running in our Rancher custom cluster. Cluster-scale should manage `K8sWorkerAsg` ASG to scale up and down between 2 and 10 nodes, when one of the following conditions is true: - -* There are pods that failed to run in the cluster due to insufficient resources. In this case, the cluster is scaled up. -* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. In this case, the cluster is scaled down. - -### Generating Load - -We've prepared a `test-deployment.yaml` just to generate load on the Kubernetes cluster and see if cluster-autoscaler is working properly. The test deployment is requesting 1000m CPU and 1024Mi memory by three replicas. Adjust the requested resources and/or replica to be sure you exhaust the Kubernetes cluster resources: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: hello-world - name: hello-world -spec: - replicas: 3 - selector: - matchLabels: - app: hello-world - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - metadata: - labels: - app: hello-world - spec: - containers: - - image: rancher/hello-world - imagePullPolicy: Always - name: hello-world - ports: - - containerPort: 80 - protocol: TCP - resources: - limits: - cpu: 1000m - memory: 1024Mi - requests: - cpu: 1000m - memory: 1024Mi -``` - -Once the test deployment is prepared, deploy it in the Kubernetes cluster default namespace (Rancher UI can be used instead): - -``` -kubectl -n default apply -f test-deployment.yaml -``` - -### Checking Scale - -Once the Kubernetes resources got exhausted, cluster-autoscaler should scale up worker nodes where pods failed to be scheduled. It should scale up until up until all pods became scheduled. You should see the new nodes on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. - -Once scale up is checked, let check for scale down. To do it, reduce the replica number on the test deployment until you release enough Kubernetes cluster resources to scale down. You should see nodes disappear on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. diff --git a/content/rancher/v2.5/en/cluster-explorer/configmaps/_index.md b/content/rancher/v2.5/en/cluster-explorer/configmaps/_index.md deleted file mode 100644 index f84deb4cceb..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/configmaps/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: ConfigMaps -weight: 3 ---- - - -While most types of Kubernetes secrets store sensitive information, [ConfigMaps](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) store general configuration information, such as a group of config files. Because ConfigMaps don't store sensitive information, they can be updated automatically, and therefore don't require their containers to be restarted following update (unlike most secret types, which require manual updates and a container restart to take effect). - -ConfigMaps accept key value pairs in common string formats, like config files or JSON blobs. After you upload a config map, any workload can reference it as either an environment variable or a volume mount. - ->**Note:** ConfigMaps can only be applied to namespaces and not projects. - -1. From the **Global** view, select the project containing the namespace that you want to add a ConfigMap to. - -1. From the main menu, select **Resources > Config Maps**. Click **Add Config Map**. - -1. Enter a **Name** for the Config Map. - - >**Note:** Kubernetes classifies ConfigMaps as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your ConfigMaps must have a unique name among the other certificates, registries, and secrets within your workspace. - -1. Select the **Namespace** you want to add Config Map to. You can also add a new namespace on the fly by clicking **Add to a new namespace**. - -1. From **Config Map Values**, click **Add Config Map Value** to add a key value pair to your ConfigMap. Add as many values as you need. - -1. Click **Save**. - - >**Note:** Don't use ConfigMaps to store sensitive data [use a secret]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/). - > - >**Tip:** You can add multiple key value pairs to the ConfigMap by copying and pasting. - > - > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} - -**Result:** Your ConfigMap is added to the namespace. You can view it in the Rancher UI from the **Resources > Config Maps** view. - -## What's Next? - -Now that you have a ConfigMap added to a namespace, you can add it to a workload that you deploy from the namespace of origin. You can use the ConfigMap to specify information for you application to consume, such as: - -- Application environment variables. -- Specifying parameters for a Volume mounted to the workload. - -For more information on adding ConfigMaps to a workload, see [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). diff --git a/content/rancher/v2.5/en/cluster-explorer/load-balancers-and-ingress/_index.md b/content/rancher/v2.5/en/cluster-explorer/load-balancers-and-ingress/_index.md deleted file mode 100644 index 85fece31e33..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/load-balancers-and-ingress/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Set Up Load Balancer and Ingress Controller within Rancher -description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers -weight: 1 ---- - -Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. - -## Load Balancers - -After you launch an application, the app is only available within the cluster. It can't be reached from outside the cluster. - -If you want your applications to be externally accessible, you must add a load balancer or ingress to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. - -Rancher supports two types of load balancers: - -- [Layer-4 Load Balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) -- [Layer-7 Load Balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) - -For more information, see [load balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). - -### Load Balancer Limitations - -Load Balancers have a couple of limitations you should be aware of: - -- Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. - -- If you want to use a load balancer with a Hosted Kubernetes cluster (i.e., clusters hosted in GKE, EKS, or AKS), the load balancer must be running within that cloud provider's infrastructure. Please review the compatibility tables regarding support for load balancers based on how you've provisioned your clusters: - - - - [Support for Layer-4 Load Balancing]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-4-load-balancing) - - - [Support for Layer-7 Load Balancing]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-7-load-balancing) - -## Ingress - -As mentioned in the limitations above, the disadvantages of using a load balancer are: - -- Load Balancers can only handle one IP address per service. -- If you run multiple services in your cluster, you must have a load balancer for each service. -- It can be expensive to have a load balancer for every service. - -In contrast, when an ingress is used as the entrypoint into a cluster, the ingress can route traffic to multiple services with greater flexibility. It can map multiple HTTP requests to services without individual IP addresses for each service. - -Therefore, it is useful to have an ingress if you want multiple services to be exposed with the same IP address, the same Layer 7 protocol, or the same privileged node-ports: 80 and 443. - -Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster direct the request to the correct service based on service subdomains or path rules that you've configured. - -Each Kubernetes Ingress resource corresponds roughly to a file in `/etc/nginx/sites-available/` containing a `server{}` configuration block, where requests for specific files and folders are configured. - -Your ingress, which creates a port of entry to your cluster similar to a load balancer, can reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launched clusters are powered by [Nginx](https://www.nginx.com/). - -Ingress can provide other functionality as well, such as SSL termination, name-based virtual hosting, and more. - ->**Using Rancher in a High Availability Configuration?** -> ->Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global entry point for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. - -- For more information on how to set up ingress in Rancher, see [Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress). -- For complete information about ingress and ingress controllers, see the [Kubernetes Ingress Documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) -- When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry, see [Global DNS]({{}}/rancher/v2.x/en/catalog/globaldns/). diff --git a/content/rancher/v2.5/en/cluster-explorer/load-balancers-and-ingress/ingress/_index.md b/content/rancher/v2.5/en/cluster-explorer/load-balancers-and-ingress/ingress/_index.md deleted file mode 100644 index f10a8928d22..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/load-balancers-and-ingress/ingress/_index.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Adding Ingresses to Your Project -description: Ingresses can be added for workloads to provide load balancing, SSL termination and host/path-based routing. Learn how to add Rancher ingress to your project -weight: 3042 ---- - -Ingress can be added for workloads to provide load balancing, SSL termination and host/path based routing. When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry]({{}}/rancher/v2.x/en/catalog/globaldns/). - -1. From the **Global** view, open the project that you want to add ingress to. - -1. Click **Resources** in the main navigation bar. Click the **Load Balancing** tab. (In versions prior to v2.3.0, just click the **Load Balancing** tab.) Then click **Add Ingress**. - -1. Enter a **Name** for the ingress. - -1. Select an existing **Namespace** from the drop-down list. Alternatively, you can create a new [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) on the fly by clicking **Add to a new namespace**. - -1. Create ingress forwarding **Rules**. - - - **Automatically generate a xip.io hostname** - - If you choose this option, ingress routes requests to hostname to a DNS name that's automatically generated. Rancher uses [xip.io](http://xip.io/) to automatically generates the DNS name. This option is best used for testing, _not_ production environments. - - >**Note:** To use this option, you must be able to resolve to `xip.io` addresses. - - 1. Add a **Target Backend**. By default, a workload is added to the ingress, but you can add more targets by clicking either **Service** or **Workload**. - - 1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. - - Typically, the first rule that you create does not include a path. - - 1. Select a workload or service from the **Target** drop-down list for each target you've added. - - 1. Enter the **Port** number that each target operates on. - - - **Specify a hostname to use** - - If you use this option, ingress routes requests for a hostname to the service or workload that you specify. - - 1. Enter the hostname that your ingress will handle request forwarding for. For example, `www.mysite.com`. - - 1. Add a **Target Backend**. By default, a workload is added to the ingress, but you can add more targets by clicking either **Service** or **Workload**. - - 1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. - - Typically, the first rule that you create does not include a path. - - 1. Select a workload or service from the **Target** drop-down list for each target you've added. - - 1. Enter the **Port** number that each target operates on. - - - - **Use as the default backend** - - Use this option to set an ingress rule for handling requests that don't match any other ingress rules. For example, use this option to route requests that can't be found to a `404` page. - - >**Note:** If you deployed Rancher using RKE, a default backend for 404s and 202s is already configured. - - 1. Add a **Target Backend**. Click either **Service** or **Workload** to add the target. - - 1. Select a service or workload from the **Target** drop-down list. - -1. **Optional:** click **Add Rule** to create additional ingress rules. For example, after you create ingress rules to direct requests for your hostname, you'll likely want to create a default backend to handle 404s. - -1. If any of your ingress rules handle requests for encrypted ports, add a certificate to encrypt/decrypt communications. - - >**Note:** You must have an SSL certificate that the ingress can use to encrypt/decrypt communications. For more information see [Adding SSL Certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/). - - 1. Click **Add Certificate**. - - 1. Select a **Certificate** from the drop-down list. - - 1. Enter the **Host** using encrypted communication. - - 1. To add additional hosts that use the certificate, click **Add Hosts**. - -1. **Optional:** Add [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) and/or [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to provide metadata for your ingress. - - For a list of annotations available for use, see the [Nginx Ingress Controller Documentation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). - -**Result:** Your ingress is added to the project. The ingress begins enforcing your ingress rules. diff --git a/content/rancher/v2.5/en/cluster-explorer/load-balancers-and-ingress/load-balancers/_index.md b/content/rancher/v2.5/en/cluster-explorer/load-balancers-and-ingress/load-balancers/_index.md deleted file mode 100644 index 1c634dc2342..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/load-balancers-and-ingress/load-balancers/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: "Layer 4 and Layer 7 Load Balancing" -description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. Learn about the support for each way in different deployments" -weight: 3041 ---- -Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. - -## Layer-4 Load Balancer - -Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. - -Often, the Layer-4 load balancer is supported by the underlying cloud provider, so when you deploy RKE clusters on bare-metal servers and vSphere clusters, Layer-4 load balancer is not supported. However, a single [globally managed config-map](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) can be used to expose services on NGINX or third-party ingress. - -> **Note:** It is possible to deploy a cluster with a non-cloud load balancer, such as [MetalLB.](https://metallb.universe.tf/) However, that use case is more advanced than the Layer-4 load balancer supported by a cloud provider, and it is not configurable in Rancher or RKE. - -### Support for Layer-4 Load Balancing - -Support for layer-4 load balancer varies based on the underlying cloud provider. - -Cluster Deployment | Layer-4 Load Balancer Support -----------------------------------------------|-------------------------------- -Amazon EKS | Supported by AWS cloud provider -Google GKE | Supported by GCE cloud provider -Azure AKS | Supported by Azure cloud provider -RKE on EC2 | Supported by AWS cloud provider -RKE on DigitalOcean | Limited NGINX or third-party Ingress* -RKE on vSphere | Limited NGINX or third party-Ingress* -RKE on Custom Hosts
(e.g. bare-metal servers) | Limited NGINX or third-party Ingress* -Third-party MetalLB | Limited NGINX or third-party Ingress* - -\* Services can be exposed through a single [globally managed config-map.](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) - -## Layer-7 Load Balancer - -Layer-7 load balancer (or the ingress controller) supports host and path-based load balancing and SSL termination. Layer-7 load balancer only forwards HTTP and HTTPS traffic and therefore they listen on ports 80 and 443 only. Cloud providers such as Amazon and Google support layer-7 load balancer. In addition, RKE clusters deploys the Nginx Ingress Controller. - -### Support for Layer-7 Load Balancing - -Support for layer-7 load balancer varies based on the underlying cloud provider. - -Cluster Deployment | Layer-7 Load Balancer Support -----------------------------------------------|-------------------------------- -Amazon EKS | Supported by AWS cloud provider -Google GKE | Supported by GKE cloud provider -Azure AKS | Not Supported -RKE on EC2 | Nginx Ingress Controller -RKE on DigitalOcean | Nginx Ingress Controller -RKE on vSphere | Nginx Ingress Controller -RKE on Custom Hosts
(e.g. bare-metal servers) | Nginx Ingress Controller - -### Host Names in Layer-7 Load Balancer - -Some cloud-managed layer-7 load balancers (such as the ALB ingress controller on AWS) expose DNS addresses for ingress rules. You need to map (via CNAME) your domain name to the DNS address generated by the layer-7 load balancer. - -Other layer-7 load balancers, such as the Google Load Balancer or Nginx Ingress Controller, directly expose one or more IP addresses. Google Load Balancer provides a single routable IP address. Nginx Ingress Controller exposes the external IP of all nodes that run the Nginx Ingress Controller. You can do either of the following: - -1. Configure your own DNS to map (via A records) your domain name to the IP addresses exposes by the Layer-7 load balancer. -2. Ask Rancher to generate an xip.io host name for your ingress rule. Rancher will take one of your exposed IPs, say a.b.c.d, and generate a host name ..a.b.c.d.xip.io. - -The benefit of using xip.io is that you obtain a working entrypoint URL immediately after you create the ingress rule. Setting up your own domain name, on the other hand, requires you to configure DNS servers and wait for DNS to propagate. - -## Related Links - -- [Create an External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) - -#### Tutorials - -- [Kubernetes installation with External Load Balancer (HTTPS/Layer 7)]({{}}/rancher/v2.x/en/installation/ha-server-install-external-lb) -- [Kubernetes installation with External Load Balancer (TCP/Layer 4)]({{}}/rancher/v2.x/en/installation/ha-server-install) -- [Docker Installation with External Load Balancer]({{}}/rancher/v2.x/en/installation/single-node-install-external-lb) - - diff --git a/content/rancher/v2.5/en/cluster-explorer/pipelines/_index.md b/content/rancher/v2.5/en/cluster-explorer/pipelines/_index.md deleted file mode 100644 index 3d80f3c57ca..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/pipelines/_index.md +++ /dev/null @@ -1,268 +0,0 @@ ---- -title: Pipelines -weight: 3047 ---- - -Rancher's pipeline provides a simple CI/CD experience. Use it to automatically checkout code, run builds or scripts, publish Docker images or catalog applications, and deploy the updated software to users. - -Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. - -After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: - -- Build your application from code to image. -- Validate your builds. -- Deploy your build images to your cluster. -- Run unit tests. -- Run regression tests. - ->**Notes:** -> ->- Pipelines improved in Rancher v2.1. Therefore, if you configured pipelines while using v2.0.x, you'll have to reconfigure them after upgrading to v2.1. ->- Still using v2.0.x? See the pipeline documentation for [previous versions]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/docs-for-v2.0.x). ->- Rancher's pipeline provides a simple CI/CD experience, but it does not offer the full power and flexibility of and is not a replacement of enterprise-grade Jenkins or other CI tools your team uses. - -This section covers the following topics: - -- [Concepts](#concepts) -- [How Pipelines Work](#how-pipelines-work) -- [Roles-based Access Control for Pipelines](#roles-based-access-control-for-pipelines) -- [Setting up Pipelines](#setting-up-pipelines) - - [Configure version control providers](#1-configure-version-control-providers) - - [Configure repositories](#2-configure-repositories) - - [Configure the pipeline](#3-configure-the-pipeline) -- [Pipeline Configuration Reference](#pipeline-configuration-reference) -- [Running your Pipelines](#running-your-pipelines) -- [Triggering a Pipeline](#triggering-a-pipeline) - - [Modifying the Event Triggers for the Repository](#modifying-the-event-triggers-for-the-repository) - -# Concepts - -For an explanation of concepts and terminology used in this section, refer to [this page.]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/concepts) - -# How Pipelines Work - -After enabling the ability to use pipelines in a project, you can configure multiple pipelines in each project. Each pipeline is unique and can be configured independently. - -A pipeline is configured off of a group of files that are checked into source code repositories. Users can configure their pipelines either through the Rancher UI or by adding a `.rancher-pipeline.yml` into the repository. - -Before pipelines can be configured, you will need to configure authentication to your version control provider, e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/) to view some common pipeline deployments. - -When you configure a pipeline in one of your projects, a namespace specifically for the pipeline is automatically created. The following components are deployed to it: - - - **Jenkins:** - - The pipeline's build engine. Because project users do not directly interact with Jenkins, it's managed and locked. - - >**Note:** There is no option to use existing Jenkins deployments as the pipeline engine. - - - **Docker Registry:** - - Out-of-the-box, the default target for your build-publish step is an internal Docker Registry. However, you can make configurations to push to a remote registry instead. The internal Docker Registry is only accessible from cluster nodes and cannot be directly accessed by users. Images are not persisted beyond the lifetime of the pipeline and should only be used in pipeline runs. If you need to access your images outside of pipeline runs, please push to an external registry. - - - **Minio:** - - Minio storage is used to store the logs for pipeline executions. - - >**Note:** The managed Jenkins instance works statelessly, so don't worry about its data persistency. The Docker Registry and Minio instances use ephemeral volumes by default, which is fine for most use cases. If you want to make sure pipeline logs can survive node failures, you can configure persistent volumes for them, as described in [data persistency for pipeline components]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/storage). - -# Roles-based Access Control for Pipelines - -If you can access a project, you can enable repositories to start building pipelines. - -Only [administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure version control providers and manage global pipeline execution settings. - -Project members can only configure repositories and pipelines. - -# Setting up Pipelines - -To set up pipelines, you will need to do the following: - -1. [Configure version control providers](#1-configure-version-control-providers) -2. [Configure repositories](#2-configure-repositories) -3. [Configure the pipeline](#3-configure-the-pipeline) - -### 1. Configure Version Control Providers - -Before you can start configuring a pipeline for your repository, you must configure and authorize a version control provider. - -| Provider | Available as of | -| --- | --- | -| GitHub | v2.0.0 | -| GitLab | v2.1.0 | -| Bitbucket | v2.2.0 | - -Select your provider's tab below and follow the directions. - -{{% tabs %}} -{{% tab "GitHub" %}} -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. In versions prior to v2.2.0, you can select **Resources > Pipelines**. - -1. Follow the directions displayed to **Setup a Github application**. Rancher redirects you to Github to setup an OAuth App in Github. - -1. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. - -1. If you're using GitHub for enterprise, select **Use a private github enterprise installation**. Enter the host address of your GitHub installation. - -1. Click **Authenticate**. - -{{% /tab %}} -{{% tab "GitLab" %}} - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. In versions prior to v2.2.0, you can select **Resources > Pipelines**. - -1. Follow the directions displayed to **Setup a GitLab application**. Rancher redirects you to GitLab. - -1. From GitLab, copy the **Application ID** and **Secret**. Paste them into Rancher. - -1. If you're using GitLab for enterprise setup, select **Use a private gitlab enterprise installation**. Enter the host address of your GitLab installation. - -1. Click **Authenticate**. - ->**Note:** -> 1. Pipeline uses Gitlab [v4 API](https://docs.gitlab.com/ee/api/v3_to_v4.html) and the supported Gitlab version is 9.0+. -> 2. If you use GitLab 10.7+ and your Rancher setup is in a local network, enable the **Allow requests to the local network from hooks and services** option in GitLab admin settings. -{{% /tab %}} -{{% tab "Bitbucket Cloud" %}} - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. - -1. Choose the **Use public Bitbucket Cloud** option. - -1. Follow the directions displayed to **Setup a Bitbucket Cloud application**. Rancher redirects you to Bitbucket to setup an OAuth consumer in Bitbucket. - -1. From Bitbucket, copy the consumer **Key** and **Secret**. Paste them into Rancher. - -1. Click **Authenticate**. - -{{% /tab %}} -{{% tab "Bitbucket Server" %}} - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. - -1. Choose the **Use private Bitbucket Server setup** option. - -1. Follow the directions displayed to **Setup a Bitbucket Server application**. - -1. Enter the host address of your Bitbucket server installation. - -1. Click **Authenticate**. - ->**Note:** -> Bitbucket server needs to do SSL verification when sending webhooks to Rancher. Please ensure that Rancher server's certificate is trusted by the Bitbucket server. There are two options: -> -> 1. Setup Rancher server with a certificate from a trusted CA. -> 1. If you're using self-signed certificates, import Rancher server's certificate to the Bitbucket server. For instructions, see the Bitbucket server documentation for [configuring self-signed certificates](https://confluence.atlassian.com/bitbucketserver/if-you-use-self-signed-certificates-938028692.html). -> -{{% /tab %}} -{{% /tabs %}} - -**Result:** After the version control provider is authenticated, you will be automatically re-directed to start configuring which repositories you want start using with a pipeline. - -### 2. Configure Repositories - -After the version control provider is authorized, you are automatically re-directed to start configuring which repositories that you want start using pipelines with. Even if someone else has set up the version control provider, you will see their repositories and can build a pipeline. - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** - -1. Click on **Configure Repositories**. - -1. A list of repositories are displayed. If you are configuring repositories the first time, click on **Authorize & Fetch Your Own Repositories** to fetch your repository list. - -1. For each repository that you want to set up a pipeline, click on **Enable**. - -1. When you're done enabling all your repositories, click on **Done**. - -**Results:** You have a list of repositories that you can start configuring pipelines for. - -### 3. Configure the Pipeline - -Now that repositories are added to your project, you can start configuring the pipeline by adding automated stages and steps. For your convenience, there are multiple built-in step types for dedicated tasks. - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** - -1. Find the repository that you want to set up a pipeline for. - -1. Configure the pipeline through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Pipeline configuration is split into stages and steps. Stages must fully complete before moving onto the next stage, but steps in a stage run concurrently. For each stage, you can add different step types. Note: As you build out each step, there are different advanced options based on the step type. Advanced options include trigger rules, environment variables, and secrets. For more information on configuring the pipeline through the UI or the YAML file, refer to the [pipeline configuration reference.]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/config) - - * If you are going to use the UI, select the vertical **⋮ > Edit Config** to configure the pipeline using the UI. After the pipeline is configured, you must view the YAML file and push it to the repository. - * If you are going to use the YAML file, select the vertical **⋮ > View/Edit YAML** to configure the pipeline. If you choose to use a YAML file, you need to push it to the repository after any changes in order for it to be updated in the repository. When editing the pipeline configuration, it takes a few moments for Rancher to check for an existing pipeline configuration. - -1. Select which `branch` to use from the list of branches. - -1. Optional: Set up notifications. - -1. Set up the trigger rules for the pipeline. - -1. Enter a **Timeout** for the pipeline. - -1. When all the stages and steps are configured, click **Done**. - -**Results:** Your pipeline is now configured and ready to be run. - - -# Pipeline Configuration Reference - -Refer to [this page]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/config) for details on how to configure a pipeline to: - -- Run a script -- Build and publish images -- Publish catalog templates -- Deploy YAML -- Deploy a catalog app - -The configuration reference also covers how to configure: - -- Notifications -- Timeouts -- The rules that trigger a pipeline -- Environment variables -- Secrets - - -# Running your Pipelines - -Run your pipeline for the first time. From the project view in Rancher, go to **Resources > Pipelines.** (In versions prior to v2.3.0, go to the **Pipelines** tab.) Find your pipeline and select the vertical **⋮ > Run**. - -During this initial run, your pipeline is tested, and the following pipeline components are deployed to your project as workloads in a new namespace dedicated to the pipeline: - -- `docker-registry` -- `jenkins` -- `minio` - -This process takes several minutes. When it completes, you can view each pipeline component from the project **Workloads** tab. - -# Triggering a Pipeline - -When a repository is enabled, a webhook is automatically set in the version control provider. By default, the pipeline is triggered by a **push** event to a repository, but you can modify the event(s) that trigger running the pipeline. - -Available Events: - -* **Push**: Whenever a commit is pushed to the branch in the repository, the pipeline is triggered. -* **Pull Request**: Whenever a pull request is made to the repository, the pipeline is triggered. -* **Tag**: When a tag is created in the repository, the pipeline is triggered. - -> **Note:** This option doesn't exist for Rancher's [example repositories]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/). - -### Modifying the Event Triggers for the Repository - -1. From the **Global** view, navigate to the project that you want to modify the event trigger for the pipeline. - -1. 1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** - -1. Find the repository that you want to modify the event triggers. Select the vertical **⋮ > Setting**. - -1. Select which event triggers (**Push**, **Pull Request** or **Tag**) you want for the repository. - -1. Click **Save**. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/pipelines/concepts/_index.md b/content/rancher/v2.5/en/cluster-explorer/pipelines/concepts/_index.md deleted file mode 100644 index db8e3a24a58..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/pipelines/concepts/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Concepts -weight: 1 ---- - -The purpose of this page is to explain common concepts and terminology related to pipelines. - -- **Pipeline:** - - A _pipeline_ is a software delivery process that is broken into different stages and steps. Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. A pipeline is based on a specific repository. It defines the process to build, test, and deploy your code. Rancher uses the [pipeline as code](https://jenkins.io/doc/book/pipeline-as-code/) model. Pipeline configuration is represented as a pipeline file in the source code repository, using the file name `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. - -- **Stages:** - - A pipeline stage consists of multiple steps. Stages are executed in the order defined in the pipeline file. The steps in a stage are executed concurrently. A stage starts when all steps in the former stage finish without failure. - -- **Steps:** - - A pipeline step is executed inside a specified stage. A step fails if it exits with a code other than `0`. If a step exits with this failure code, the entire pipeline fails and terminates. - -- **Workspace:** - - The workspace is the working directory shared by all pipeline steps. In the beginning of a pipeline, source code is checked out to the workspace. The command for every step bootstraps in the workspace. During a pipeline execution, the artifacts from a previous step will be available in future steps. The working directory is an ephemeral volume and will be cleaned out with the executor pod when a pipeline execution is finished. - -Typically, pipeline stages include: - -- **Build:** - - Each time code is checked into your repository, the pipeline automatically clones the repo and builds a new iteration of your software. Throughout this process, the software is typically reviewed by automated tests. - -- **Publish:** - - After the build is completed, either a Docker image is built and published to a Docker registry or a catalog template is published. - -- **Deploy:** - - After the artifacts are published, you would release your application so users could start using the updated product. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/pipelines/config/_index.md b/content/rancher/v2.5/en/cluster-explorer/pipelines/config/_index.md deleted file mode 100644 index 3b9547fab67..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/pipelines/config/_index.md +++ /dev/null @@ -1,645 +0,0 @@ ---- -title: Pipeline Configuration Reference -weight: 1 ---- - -In this section, you'll learn how to configure pipelines. - -- [Step Types](#step-types) -- [Step Type: Run Script](#step-type-run-script) -- [Step Type: Build and Publish Images](#step-type-build-and-publish-images) -- [Step Type: Publish Catalog Template](#step-type-publish-catalog-template) -- [Step Type: Deploy YAML](#step-type-deploy-yaml) -- [Step Type: Deploy Catalog App](#step-type-deploy-catalog-app) -- [Notifications](#notifications) -- [Timeouts](#timeouts) -- [Triggers and Trigger Rules](#triggers-and-trigger-rules) -- [Environment Variables](#environment-variables) -- [Secrets](#secrets) -- [Pipeline Variable Substitution Reference](#pipeline-variable-substitution-reference) -- [Global Pipeline Execution Settings](#global-pipeline-execution-settings) - - [Executor Quota](#executor-quota) - - [Resource Quota for Executors](#resource-quota-for-executors) - - [Custom CA](#custom-ca) -- [Persistent Data for Pipeline Components](#persistent-data-for-pipeline-components) -- [Example rancher-pipeline.yml](#example-rancher-pipeline-yml) - -# Step Types - -Within each stage, you can add as many steps as you'd like. When there are multiple steps in one stage, they run concurrently. - -Step types include: - -- [Run Script](#step-type-run-script) -- [Build and Publish Images](#step-type-build-and-publish-images) -- [Publish Catalog Template](#step-type-publish-catalog-template) -- [Deploy YAML](#step-type-deploy-yaml) -- [Deploy Catalog App](#step-type-deploy-catalog-app) - - - -### Configuring Steps By UI - -If you haven't added any stages, click **Configure pipeline for this branch** to configure the pipeline through the UI. - -1. Add stages to your pipeline execution by clicking **Add Stage**. - - 1. Enter a **Name** for each stage of your pipeline. - 1. For each stage, you can configure [trigger rules](#triggers-and-trigger-rules) by clicking on **Show Advanced Options**. Note: this can always be updated at a later time. - -1. After you've created a stage, start [adding steps](#step-types) by clicking **Add a Step**. You can add multiple steps to each stage. - -### Configuring Steps by YAML - -For each stage, you can add multiple steps. Read more about each [step type](#step-types) and the advanced options to get all the details on how to configure the YAML. This is only a small example of how to have multiple stages with a singular step in each stage. - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: date -R - - name: Publish my image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: rancher/rancher:v2.0.0 - # Optionally push to remote registry - pushRemote: true - registry: reg.example.com -``` -# Step Type: Run Script - -The **Run Script** step executes arbitrary commands in the workspace inside a specified container. You can use it to build, test and do more, given whatever utilities the base image provides. For your convenience, you can use variables to refer to metadata of a pipeline execution. Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. - -### Configuring Script by UI - -1. From the **Step Type** drop-down, choose **Run Script** and fill in the form. - -1. Click **Add**. - -### Configuring Script by YAML -```yaml -# example -stages: -- name: Build something - steps: - - runScriptConfig: - image: golang - shellScript: go build -``` -# Step Type: Build and Publish Images - -The **Build and Publish Image** step builds and publishes a Docker image. This process requires a Dockerfile in your source code's repository to complete successfully. - -The option to publish an image to an insecure registry is not exposed in the UI, but you can specify an environment variable in the YAML that allows you to publish an image insecurely. - -### Configuring Building and Publishing Images by UI -1. From the **Step Type** drop-down, choose **Build and Publish**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Dockerfile Path | The relative path to the Dockerfile in the source code repo. By default, this path is `./Dockerfile`, which assumes the Dockerfile is in the root directory. You can set it to other paths in different use cases (`./path/to/myDockerfile` for example). | - Image Name | The image name in `name:tag` format. The registry address is not required. For example, to build `example.com/repo/my-image:dev`, enter `repo/my-image:dev`. | - Push image to remote repository | An option to set the registry that publishes the image that's built. To use this option, enable it and choose a registry from the drop-down. If this option is disabled, the image is pushed to the internal registry. | - Build Context

(**Show advanced options**)| By default, the root directory of the source code (`.`). For more details, see the Docker [build command documentation](https://docs.docker.com/engine/reference/commandline/build/). - -### Configuring Building and Publishing Images by YAML - -You can use specific arguments for Docker daemon and the build. They are not exposed in the UI, but they are available in pipeline YAML format, as indicated in the example below. Available environment variables include: - -Variable Name | Description -------------------------|------------------------------------------------------------ -PLUGIN_DRY_RUN | Disable docker push -PLUGIN_DEBUG | Docker daemon executes in debug mode -PLUGIN_MIRROR | Docker daemon registry mirror -PLUGIN_INSECURE | Docker daemon allows insecure registries -PLUGIN_BUILD_ARGS | Docker build args, a comma separated list - -
- -```yaml -# This example shows an environment variable being used -# in the Publish Image step. This variable allows you to -# publish an image to an insecure registry: - -stages: -- name: Publish Image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: repo/app:v1 - pushRemote: true - registry: example.com - env: - PLUGIN_INSECURE: "true" -``` - -# Step Type: Publish Catalog Template - -The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a [git hosted chart repository]({{}}/rancher/v2.x/en/catalog/custom/). It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. - -### Configuring Publishing a Catalog Template by UI - -1. From the **Step Type** drop-down, choose **Publish Catalog Template**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Chart Folder | The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. | - Catalog Template Name | The name of the template. For example, wordpress. | - Catalog Template Version | The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. | - Protocol | You can choose to publish via HTTP(S) or SSH protocol. | - Secret | The secret that stores your Git credentials. You need to create a secret in dedicated pipeline namespace in the project before adding this step. If you use HTTP(S) protocol, store Git username and password in `USERNAME` and `PASSWORD` key of the secret. If you use SSH protocol, store Git deploy key in `DEPLOY_KEY` key of the secret. After the secret is created, select it in this option. | - Git URL | The Git URL of the chart repository that the template will be published to. | - Git Branch | The Git branch of the chart repository that the template will be published to. | - Author Name | The author name used in the commit message. | - Author Email | The author email used in the commit message. | - - -### Configuring Publishing a Catalog Template by YAML - -You can add **Publish Catalog Template** steps directly in the `.rancher-pipeline.yml` file. - -Under the `steps` section, add a step with `publishCatalogConfig`. You will provide the following information: - -* Path: The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. -* CatalogTemplate: The name of the template. -* Version: The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. -* GitUrl: The git URL of the chart repository that the template will be published to. -* GitBranch: The git branch of the chart repository that the template will be published to. -* GitAuthor: The author name used in the commit message. -* GitEmail: The author email used in the commit message. -* Credentials: You should provide Git credentials by referencing secrets in dedicated pipeline namespace. If you publish via SSH protocol, inject your deploy key to the `DEPLOY_KEY` environment variable. If you publish via HTTP(S) protocol, inject your username and password to `USERNAME` and `PASSWORD` environment variables. - -```yaml -# example -stages: -- name: Publish Wordpress Template - steps: - - publishCatalogConfig: - path: ./charts/wordpress/latest - catalogTemplate: wordpress - version: ${CICD_GIT_TAG} - gitUrl: git@github.com:myrepo/charts.git - gitBranch: master - gitAuthor: example-user - gitEmail: user@example.com - envFrom: - - sourceName: publish-keys - sourceKey: DEPLOY_KEY -``` - -# Step Type: Deploy YAML - -This step deploys arbitrary Kubernetes resources to the project. This deployment requires a Kubernetes manifest file to be present in the source code repository. Pipeline variable substitution is supported in the manifest file. You can view an example file at [GitHub](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml). Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. - -### Configure Deploying YAML by UI - -1. From the **Step Type** drop-down, choose **Deploy YAML** and fill in the form. - -1. Enter the **YAML Path**, which is the path to the manifest file in the source code. - -1. Click **Add**. - -### Configure Deploying YAML by YAML - -```yaml -# example -stages: -- name: Deploy - steps: - - applyYamlConfig: - path: ./deployment.yaml -``` - -# Step Type :Deploy Catalog App - -The **Deploy Catalog App** step deploys a catalog app in the project. It will install a new app if it is not present, or upgrade an existing one. - -### Configure Deploying Catalog App by UI - -1. From the **Step Type** drop-down, choose **Deploy Catalog App**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Catalog | The catalog from which the app template will be used. | - Template Name | The name of the app template. For example, wordpress. | - Template Version | The version of the app template you want to deploy. | - Namespace | The target namespace where you want to deploy the app. | - App Name | The name of the app you want to deploy. | - Answers | Key-value pairs of answers used to deploy the app. | - - -### Configure Deploying Catalog App by YAML - -You can add **Deploy Catalog App** steps directly in the `.rancher-pipeline.yml` file. - -Under the `steps` section, add a step with `applyAppConfig`. You will provide the following information: - -* CatalogTemplate: The ID of the template. This can be found by clicking `Launch app` and selecting `View details` for the app. It is the last part of the URL. -* Version: The version of the template you want to deploy. -* Answers: Key-value pairs of answers used to deploy the app. -* Name: The name of the app you want to deploy. -* TargetNamespace: The target namespace where you want to deploy the app. - -```yaml -# example -stages: -- name: Deploy App - steps: - - applyAppConfig: - catalogTemplate: cattle-global-data:library-mysql - version: 0.3.8 - answers: - persistence.enabled: "false" - name: testmysql - targetNamespace: test -``` - -# Timeouts - -By default, each pipeline execution has a timeout of 60 minutes. If the pipeline execution cannot complete within its timeout period, the pipeline is aborted. - -### Configuring Timeouts by UI - -Enter a new value in the **Timeout** field. - -### Configuring Timeouts by YAML - -In the `timeout` section, enter the timeout value in minutes. - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls -# timeout in minutes -timeout: 30 -``` - -# Notifications - -You can enable notifications to any [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) based on the build status of a pipeline. Before enabling notifications, Rancher recommends [setting up notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers) so it will be easy to add recipients immediately. - -### Configuring Notifications by UI - -1. Within the **Notification** section, turn on notifications by clicking **Enable**. - -1. Select the conditions for the notification. You can select to get a notification for the following statuses: `Failed`, `Success`, `Changed`. For example, if you want to receive notifications when an execution fails, select **Failed**. - -1. If you don't have any existing [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers), Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. - - > **Note:** Notifiers are configured at a cluster level and require a different level of permissions. - -1. For each recipient, select which notifier type from the dropdown. Based on the type of notifier, you can use the default recipient or override the recipient with a different one. For example, if you have a notifier for _Slack_, you can update which channel to send the notification to. You can add additional notifiers by clicking **Add Recipient**. - -### Configuring Notifications by YAML - -In the `notification` section, you will provide the following information: - -* **Recipients:** This will be the list of notifiers/recipients that will receive the notification. - * **Notifier:** The ID of the notifier. This can be found by finding the notifier and selecting **View in API** to get the ID. - * **Recipient:** Depending on the type of the notifier, the "default recipient" can be used or you can override this with a different recipient. For example, when configuring a slack notifier, you select a channel as your default recipient, but if you wanted to send notifications to a different channel, you can select a different recipient. -* **Condition:** Select which conditions of when you want the notification to be sent. -* **Message (Optional):** If you want to change the default notification message, you can edit this in the yaml. Note: This option is not available in the UI. - -```yaml -# Example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls -notification: - recipients: - - # Recipient - recipient: "#mychannel" - # ID of Notifier - notifier: "c-wdcsr:n-c9pg7" - - recipient: "test@example.com" - notifier: "c-wdcsr:n-lkrhd" - # Select which statuses you want the notification to be sent - condition: ["Failed", "Success", "Changed"] - # Ability to override the default message (Optional) - message: "my-message" -``` - -# Triggers and Trigger Rules - -After you configure a pipeline, you can trigger it using different methods: - -- **Manually:** - - After you configure a pipeline, you can trigger a build using the latest CI definition from Rancher UI. When a pipeline execution is triggered, Rancher dynamically provisions a Kubernetes pod to run your CI tasks and then remove it upon completion. - -- **Automatically:** - - When you enable a repository for a pipeline, webhooks are automatically added to the version control system. When project users interact with the repo by pushing code, opening pull requests, or creating a tag, the version control system sends a webhook to Rancher Server, triggering a pipeline execution. - - To use this automation, webhook management permission is required for the repository. Therefore, when users authenticate and fetch their repositories, only those on which they have webhook management permission will be shown. - -Trigger rules can be created to have fine-grained control of pipeline executions in your pipeline configuration. Trigger rules come in two types: - -- **Run this when:** This type of rule starts the pipeline, stage, or step when a trigger explicitly occurs. - -- **Do Not Run this when:** This type of rule skips the pipeline, stage, or step when a trigger explicitly occurs. - -If all conditions evaluate to `true`, then the pipeline/stage/step is executed. Otherwise it is skipped. When a pipeline is skipped, none of the pipeline is executed. When a stage/step is skipped, it is considered successful and follow-up stages/steps continue to run. - -Wildcard character (`*`) expansion is supported in `branch` conditions. - -This section covers the following topics: - -- [Configuring pipeline triggers](#configuring-pipeline-triggers) -- [Configuring stage triggers](#configuring-stage-triggers) -- [Configuring step triggers](#configuring-step-triggers) -- [Configuring triggers by YAML](#configuring-triggers-by-yaml) - -### Configuring Pipeline Triggers - -1. From the **Global** view, navigate to the project that you want to configure a pipeline trigger rule. - -1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** - -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. - -1. Click on **Show Advanced Options**. - -1. In the **Trigger Rules** section, configure rules to run or skip the pipeline. - - 1. Click **Add Rule**. In the **Value** field, enter the name of the branch that triggers the pipeline. - - 1. **Optional:** Add more branches that trigger a build. - -1. Click **Done.** - -### Configuring Stage Triggers - -1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. - -1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** - -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. - -1. Find the **stage** that you want to manage trigger rules, click the **Edit** icon for that stage. - -1. Click **Show advanced options**. - -1. In the **Trigger Rules** section, configure rules to run or skip the stage. - - 1. Click **Add Rule**. - - 1. Choose the **Type** that triggers the stage and enter a value. - - | Type | Value | - | ------ | -------------------------------------------------------------------- | - | Branch | The name of the branch that triggers the stage. | - | Event | The type of event that triggers the stage. Values are: `Push`, `Pull Request`, `Tag` | - -1. Click **Save**. - -### Configuring Step Triggers - -1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. - -1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** - -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. - -1. Find the **step** that you want to manage trigger rules, click the **Edit** icon for that step. - -1. Click **Show advanced options**. - -1. In the **Trigger Rules** section, configure rules to run or skip the step. - - 1. Click **Add Rule**. - - 1. Choose the **Type** that triggers the step and enter a value. - - | Type | Value | - | ------ | -------------------------------------------------------------------- | - | Branch | The name of the branch that triggers the step. | - | Event | The type of event that triggers the step. Values are: `Push`, `Pull Request`, `Tag` | - -1. Click **Save**. - - -### Configuring Triggers by YAML - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: date -R - # Conditions for steps - when: - branch: [ master, dev ] - event: push -# branch conditions for the pipeline -branch: - include: [ master, feature/*] - exclude: [ dev ] -``` - -# Environment Variables - -When configuring a pipeline, certain [step types](#step-types) allow you to use environment variables to configure the step's script. - -### Configuring Environment Variables by UI - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** - -1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. - -1. Within one of the stages, find the **step** that you want to add an environment variable for, click the **Edit** icon. - -1. Click **Show advanced options**. - -1. Click **Add Variable**, and then enter a key and value in the fields that appear. Add more variables if needed. - -1. Add your environment variable(s) into either the script or file. - -1. Click **Save**. - -### Configuring Environment Variables by YAML - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${FIRST_KEY} && echo ${SECOND_KEY} - env: - FIRST_KEY: VALUE - SECOND_KEY: VALUE2 -``` - -# Secrets - -If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/). - -### Prerequisite -Create a secret in the same project as your pipeline, or explicitly in the namespace where pipeline build pods run. -
- ->**Note:** Secret injection is disabled on [pull request events](#triggers-and-trigger-rules). - -### Configuring Secrets by UI - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** - -1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. - -1. Within one of the stages, find the **step** that you want to use a secret for, click the **Edit** icon. - -1. Click **Show advanced options**. - -1. Click **Add From Secret**. Select the secret file that you want to use. Then choose a key. Optionally, you can enter an alias for the key. - -1. Click **Save**. - -### Configuring Secrets by YAML - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${ALIAS_ENV} - # environment variables from project secrets - envFrom: - - sourceName: my-secret - sourceKey: secret-key - targetKey: ALIAS_ENV -``` - -# Pipeline Variable Substitution Reference - -For your convenience, the following variables are available for your pipeline configuration scripts. During pipeline executions, these variables are replaced by metadata. You can reference them in the form of `${VAR_NAME}`. - -Variable Name | Description -------------------------|------------------------------------------------------------ -`CICD_GIT_REPO_NAME` | Repository name (Github organization omitted). -`CICD_GIT_URL` | URL of the Git repository. -`CICD_GIT_COMMIT` | Git commit ID being executed. -`CICD_GIT_BRANCH` | Git branch of this event. -`CICD_GIT_REF` | Git reference specification of this event. -`CICD_GIT_TAG` | Git tag name, set on tag event. -`CICD_EVENT` | Event that triggered the build (`push`, `pull_request` or `tag`). -`CICD_PIPELINE_ID` | Rancher ID for the pipeline. -`CICD_EXECUTION_SEQUENCE` | Build number of the pipeline. -`CICD_EXECUTION_ID` | Combination of `{CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE}`. -`CICD_REGISTRY` | Address for the Docker registry for the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. -`CICD_IMAGE` | Name of the image built from the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. It does not contain the image tag.

[Example](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml) - -# Global Pipeline Execution Settings - -After configuring a version control provider, there are several options that can be configured globally on how pipelines are executed in Rancher. These settings can be edited by selecting **Tools > Pipelines** in the navigation bar. In versions prior to v2.2.0, you can select **Resources > Pipelines**. - -- [Executor Quota](#executor-quota) -- [Resource Quota for Executors](#resource-quota-for-executors) -- [Custom CA](#custom-ca) - -### Executor Quota - -Select the maximum number of pipeline executors. The _executor quota_ decides how many builds can run simultaneously in the project. If the number of triggered builds exceeds the quota, subsequent builds will queue until a vacancy opens. By default, the quota is `2`. A value of `0` or less removes the quota limit. - -### Resource Quota for Executors - -Configure compute resources for Jenkins agent containers. When a pipeline execution is triggered, a build pod is dynamically provisioned to run your CI tasks. Under the hood, A build pod consists of one Jenkins agent container and one container for each pipeline step. You can [manage compute resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for every containers in the pod. - -Edit the **Memory Reservation**, **Memory Limit**, **CPU Reservation** or **CPU Limit**, then click **Update Limit and Reservation**. - -To configure compute resources for pipeline-step containers: - -You can configure compute resources for pipeline-step containers in the `.rancher-pipeline.yml` file. - -In a [step type]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#step-types), you will provide the following information: - -* **CPU Reservation (`CpuRequest`)**: CPU request for the container of a pipeline step. -* **CPU Limit (`CpuLimit`)**: CPU limit for the container of a pipeline step. -* **Memory Reservation (`MemoryRequest`)**: Memory request for the container of a pipeline step. -* **Memory Limit (`MemoryLimit`)**: Memory limit for the container of a pipeline step. - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls - cpuRequest: 100m - cpuLimit: 1 - memoryRequest:100Mi - memoryLimit: 1Gi - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: repo/app:v1 - cpuRequest: 100m - cpuLimit: 1 - memoryRequest:100Mi - memoryLimit: 1Gi -``` - ->**Note:** Rancher sets default compute resources for pipeline steps except for `Build and Publish Images` and `Run Script` steps. You can override the default value by specifying compute resources in the same way. - -### Custom CA - -If you want to use a version control provider with a certificate from a custom/internal CA root, the CA root certificates need to be added as part of the version control provider configuration in order for the pipeline build pods to succeed. - -1. Click **Edit cacerts**. - -1. Paste in the CA root certificates and click **Save cacerts**. - -**Result:** Pipelines can be used and new pods will be able to work with the self-signed-certificate. - -# Persistent Data for Pipeline Components - -The internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. - -For details on setting up persistent storage for pipelines, refer to [this page.]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/storage) - -# Example rancher-pipeline.yml - -An example pipeline configuration file is on [this page.]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/example) \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/pipelines/docs-for-v2.0.x/_index.md b/content/rancher/v2.5/en/cluster-explorer/pipelines/docs-for-v2.0.x/_index.md deleted file mode 100644 index cd6f3c0da1d..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/pipelines/docs-for-v2.0.x/_index.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: v2.0.x Pipeline Documentation -weight: 9000 ---- - ->**Note:** This section describes the pipeline feature as implemented in Rancher v2.0.x. If you are using Rancher v2.1 or later, where pipelines have been significantly improved, please refer to the new documentation for [v2.1 or later]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/). - - - -Pipelines help you automate the software delivery process. You can integrate Rancher with GitHub to create a pipeline. - -You can set up your pipeline to run a series of stages and steps to test your code and deploy it. - -
-
Pipelines
-
Contain a series of stages and steps. Out-of-the-box, the pipelines feature supports fan out and in capabilities.
-
Stages
-
Executed sequentially. The next stage will not execute until all of the steps within the stage execute.
-
Steps
-
Are executed in parallel within a stage.
-
- -## Enabling CI Pipelines - -1. Select cluster from drop down. - -2. Under tools menu select pipelines. - -3. Follow instructions for setting up github auth on page. - - -## Creating CI Pipelines - -1. Go to the project you want this pipeline to run in. - -2. Click **Resources > Pipelines.** In versions prior to v2.3.0,click **Workloads > Pipelines.** - -4. Click Add pipeline button. - -5. Enter in your repository name (Autocomplete should help zero in on it quickly). - -6. Select Branch options. - - - Only the branch {BRANCH NAME}: Only events triggered by changes to this branch will be built. - - - Everything but {BRANCH NAME}: Build any branch that triggered an event EXCEPT events from this branch. - - - All branches: Regardless of the branch that triggered the event always build. - - >**Note:** If you want one path for master, but another for PRs or development/test/feature branches, create two separate pipelines. - -7. Select the build trigger events. By default, builds will only happen by manually clicking build now in Rancher UI. - - - Automatically build this pipeline whenever there is a git commit. (This respects the branch selection above) - - - Automatically build this pipeline whenever there is a new PR. - - - Automatically build the pipeline. (Allows you to configure scheduled builds similar to Cron) - -8. Click Add button. - - By default, Rancher provides a three stage pipeline for you. It consists of a build stage where you would compile, unit test, and scan code. The publish stage has a single step to publish a docker image. - - -8. Add a name to the pipeline in order to complete adding a pipeline. - -9. Click on the ‘run a script’ box under the ‘Build’ stage. - - Here you can set the image, or select from pre-packaged envs. - -10. Configure a shell script to run inside the container when building. - -11. Click Save to persist the changes. - -12. Click the “publish an image’ box under the “Publish” stage. - -13. Set the location of the Dockerfile. By default it looks in the root of the workspace. Instead, set the build context for building the image relative to the root of the workspace. - -14. Set the image information. - - The registry is the remote registry URL. It is defaulted to Docker hub. - Repository is the `/` in the repository. - -15. Select the Tag. You can hard code a tag like ‘latest’ or select from a list of available variables. - -16. If this is the first time using this registry, you can add the username/password for pushing the image. You must click save for the registry credentials AND also save for the modal. - - - - -## Creating a New Stage - -1. To add a new stage the user must click the ‘add a new stage’ link in either create or edit mode of the pipeline view. - -2. Provide a name for the stage. - -3. Click save. - - -## Creating a New Step - -1. Go to create / edit mode of the pipeline. - -2. Click “Add Step” button in the stage that you would like to add a step in. - -3. Fill out the form as detailed above - - -## Environment Variables - -For your convenience the following environment variables are available in your build steps: - -Variable Name | Description -------------------------|------------------------------------------------------------ -CICD_GIT_REPO_NAME | Repository Name (Stripped of Github Organization) -CICD_PIPELINE_NAME | Name of the pipeline -CICD_GIT_BRANCH | Git branch of this event -CICD_TRIGGER_TYPE | Event that triggered the build -CICD_PIPELINE_ID | Rancher ID for the pipeline -CICD_GIT_URL | URL of the Git repository -CICD_EXECUTION_SEQUENCE | Build number of the pipeline -CICD_EXECUTION_ID | Combination of {CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE} -CICD_GIT_COMMIT | Git commit ID being executed. diff --git a/content/rancher/v2.5/en/cluster-explorer/pipelines/example-repos/_index.md b/content/rancher/v2.5/en/cluster-explorer/pipelines/example-repos/_index.md deleted file mode 100644 index 2e24fbfeeb1..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/pipelines/example-repos/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Example Repositories -weight: 500 ---- - -Rancher ships with several example repositories that you can use to familiarize yourself with pipelines. We recommend configuring and testing the example repository that most resembles your environment before using pipelines with your own repositories in a production environment. Use this example repository as a sandbox for repo configuration, build demonstration, etc. Rancher includes example repositories for: - -- Go -- Maven -- php - -> **Note:** The example repositories are only available if you have not [configured a version control provider]({{}}/rancher/v2.x/en/project-admin/pipelines). - -To start using these example repositories, - -1. [Enable the example repositories](#1-enable-the-example-repositories) -2. [View the example pipeline](#2-view-the-example-pipeline) -3. [Run the example pipeline](#3-run-the-example-pipeline) - -### 1. Enable the Example Repositories - -By default, the example pipeline repositories are disabled. Enable one (or more) to test out the pipeline feature and see how it works. - -1. From the **Global** view, navigate to the project that you want to test out pipelines. - -1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** - -1. Click **Configure Repositories**. - - **Step Result:** A list of example repositories displays. - - >**Note:** Example repositories only display if you haven't fetched your own repos. - -1. Click **Enable** for one of the example repos (e.g., `https://github.com/rancher/pipeline-example-go.git`). Then click **Done**. - -**Results:** - -- The example repository is enabled to work with a pipeline is available in the **Pipeline** tab. - -- The following workloads are deployed to a new namespace: - - - `docker-registry` - - `jenkins` - - `minio` - -### 2. View the Example Pipeline - -After enabling an example repository, review the pipeline to see how it is set up. - -1. From the **Global** view, navigate to the project that you want to test out pipelines. - -1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** - -1. Find the example repository, select the vertical **⋮**. There are two ways to view the pipeline: - * **Rancher UI**: Click on **Edit Config** to view the stages and steps of the pipeline. - * **YAML**: Click on View/Edit YAML to view the `./rancher-pipeline.yml` file. - -### 3. Run the Example Pipeline - -After enabling an example repository, run the pipeline to see how it works. - -1. From the **Global** view, navigate to the project that you want to test out pipelines. - -1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** - -1. Find the example repository, select the vertical **⋮ > Run**. - - >**Note:** When you run a pipeline the first time, it takes a few minutes to pull relevant images and provision necessary pipeline components. - -**Result:** The pipeline runs. You can see the results in the logs. - -### What's Next? - -For detailed information about setting up your own pipeline for your repository, [configure a version control provider]({{}}/rancher/v2.x/en/project-admin/pipelines), [enable a repository](#configure-repositories) and finally [configure your pipeline]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#pipeline-configuration). \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/pipelines/example/_index.md b/content/rancher/v2.5/en/cluster-explorer/pipelines/example/_index.md deleted file mode 100644 index 28cad6e2b8f..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/pipelines/example/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Example YAML File -weight: 501 ---- - -Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. - -In the [pipeline configuration reference]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/config), we provide examples of how to configure each feature using the Rancher UI or using YAML configuration. - -Below is a full example `rancher-pipeline.yml` for those who want to jump right in. - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${FIRST_KEY} && echo ${ALIAS_ENV} - # Set environment variables in container for the step - env: - FIRST_KEY: VALUE - SECOND_KEY: VALUE2 - # Set environment variables from project secrets - envFrom: - - sourceName: my-secret - sourceKey: secret-key - targetKey: ALIAS_ENV - - runScriptConfig: - image: busybox - shellScript: date -R - # Conditions for steps - when: - branch: [ master, dev ] - event: push - - name: Publish my image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: rancher/rancher:v2.0.0 - # Optionally push to remote registry - pushRemote: true - registry: reg.example.com - - name: Deploy some workloads - steps: - - applyYamlConfig: - path: ./deployment.yaml -# branch conditions for the pipeline -branch: - include: [ master, feature/*] - exclude: [ dev ] -# timeout in minutes -timeout: 30 -notification: - recipients: - - # Recipient - recipient: "#mychannel" - # ID of Notifier - notifier: "c-wdcsr:n-c9pg7" - - recipient: "test@example.com" - notifier: "c-wdcsr:n-lkrhd" - # Select which statuses you want the notification to be sent - condition: ["Failed", "Success", "Changed"] - # Ability to override the default message (Optional) - message: "my-message" -``` diff --git a/content/rancher/v2.5/en/cluster-explorer/pipelines/storage/_index.md b/content/rancher/v2.5/en/cluster-explorer/pipelines/storage/_index.md deleted file mode 100644 index 6fec0fa6ccb..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/pipelines/storage/_index.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: Configuring Persistent Data for Pipeline Components -weight: 600 ---- - -The internal [Docker registry](#how-pipelines-work) and the [Minio](#how-pipelines-work) workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. - -This section assumes that you understand how persistent storage works in Kubernetes. For more information, refer to the section on [how storage works.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/) - ->**Prerequisites (for both parts A and B):** -> ->[Persistent volumes]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) must be available for the cluster. - -### A. Configuring Persistent Data for Docker Registry - -1. From the project that you're configuring a pipeline for, and click **Resources > Workloads.** In versions prior to v2.3.0, select the **Workloads** tab. - -1. Find the `docker-registry` workload and select **⋮ > Edit**. - -1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: - - - **Add Volume > Add a new persistent volume (claim)** - - **Add Volume > Use an existing persistent volume (claim)** - -1. Complete the form that displays to choose a persistent volume for the internal Docker registry. -{{% tabs %}} -{{% tab "Add a new persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Select a volume claim **Source**: - - - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. - - - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} - -{{% tab "Use an existing persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Choose a **Persistent Volume Claim** from the drop-down. - -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} - -{{% /tabs %}} - -1. From the **Mount Point** field, enter `/var/lib/registry`, which is the data storage path inside the Docker registry container. - -1. Click **Upgrade**. - -### B. Configuring Persistent Data for Minio - -1. From the project view, click **Resources > Workloads.** (In versions prior to v2.3.0, click the **Workloads** tab.) Find the `minio` workload and select **⋮ > Edit**. - -1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: - - - **Add Volume > Add a new persistent volume (claim)** - - **Add Volume > Use an existing persistent volume (claim)** - -1. Complete the form that displays to choose a persistent volume for the internal Docker registry. -{{% tabs %}} - -{{% tab "Add a new persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Select a volume claim **Source**: - - - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. - - - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} -{{% tab "Use an existing persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Choose a **Persistent Volume Claim** from the drop-down. - -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} -{{% /tabs %}} - -1. From the **Mount Point** field, enter `/data`, which is the data storage path inside the Minio container. - -1. Click **Upgrade**. - -**Result:** Persistent storage is configured for your pipeline components. diff --git a/content/rancher/v2.5/en/cluster-explorer/pod-security-policy/_index.md b/content/rancher/v2.5/en/cluster-explorer/pod-security-policy/_index.md deleted file mode 100644 index 261e1e11782..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/pod-security-policy/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Adding a Pod Security Policy -weight: 80 ---- - -> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) - -When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. - -You can assign a pod security policy when you provision a cluster. However, if you need to relax or restrict security for your pods later, you can update the policy while editing your cluster. - -1. From the **Global** view, find the cluster to which you want to apply a pod security policy. Select **⋮ > Edit**. - -2. Expand **Cluster Options**. - -3. From **Pod Security Policy Support**, select **Enabled**. - - >**Note:** This option is only available for clusters [provisioned by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -4. From the **Default Pod Security Policy** drop-down, select the policy you want to apply to the cluster. - - Rancher ships with [policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/#default-pod-security-policies) as well. - -5. Click **Save**. - -**Result:** The pod security policy is applied to the cluster and any projects within the cluster. - ->**Note:** Workloads already running before assignment of a pod security policy are grandfathered in. Even if they don't meet your pod security policy, workloads running before assignment of the policy continue to run. -> ->To check if a running workload passes your pod security policy, clone or upgrade it. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/project-admin/_index.md b/content/rancher/v2.5/en/cluster-explorer/project-admin/_index.md deleted file mode 100644 index 8b66f65ae47..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/project-admin/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Projects -weight: 2500 ---- - -_Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. - -In terms of hierarchy: - -- Clusters contain projects -- Projects contain namespaces - -Within Rancher, projects allow you to manage multiple namespaces as a single entity. In native Kubernetes, which does not include projects, features like role-based access rights or cluster resources are assigned to individual namespaces. In clusters where multiple namespaces require the same set of access rights, assigning these rights to each individual namespace can become tedious. Even though all namespaces require the same rights, there's no way to apply those rights to all of your namespaces in a single action. You'd have to repetitively assign these rights to each namespace! - -Rancher projects resolve this issue by allowing you to apply resources and access rights at the project level. Each namespace in the project then inherits these resources and policies, so you only have to assign them to the project once, rather than assigning them to each individual namespace. - -You can use projects to perform actions like: - -- [Assign users access to a group of namespaces]({{}}/rancher/v2.x/en/project-admin/project-members) -- Assign users [specific roles in a project]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). A role can be owner, member, read-only, or [custom]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/) -- [Set resource quotas]({{}}/rancher/v2.x/en/project-admin/resource-quotas/) -- [Manage namespaces]({{}}/rancher/v2.x/en/project-admin/namespaces/) -- [Configure tools]({{}}/rancher/v2.x/en/project-admin/tools/) -- [Set up pipelines for continuous integration and deployment]({{}}/rancher/v2.x/en/project-admin/pipelines) -- [Configure pod security policies]({{}}/rancher/v2.x/en/project-admin/pod-security-policies) - -### Authorization - -Non-administrative users are only authorized for project access after an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owner or member]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) adds them to the project's **Members** tab. - -Whoever creates the project automatically becomes a [project owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). - -## Switching between Projects - -To switch between projects, use the drop-down available in the navigation bar. Alternatively, you can switch between projects directly in the navigation bar. - -1. From the **Global** view, navigate to the project that you want to configure. - -1. Select **Projects/Namespaces** from the navigation bar. - -1. Select the link for the project that you want to open. diff --git a/content/rancher/v2.5/en/cluster-explorer/project-admin/namespaces/_index.md b/content/rancher/v2.5/en/cluster-explorer/project-admin/namespaces/_index.md deleted file mode 100644 index 82b308daf17..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/project-admin/namespaces/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Namespaces -weight: 2520 ---- - -Within Rancher, you can further divide projects into different [namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), which are virtual clusters within a project backed by a physical cluster. Should you require another level of organization beyond projects and the `default` namespace, you can use multiple namespaces to isolate applications and resources. - -Although you assign resources at the project level so that each namespace in the project can use them, you can override this inheritance by assigning resources explicitly to a namespace. - -Resources that you can assign directly to namespaces include: - -- [Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/) -- [Load Balancers/Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/) -- [Service Discovery Records]({{}}/rancher/v2.x/en/k8s-in-rancher/service-discovery/) -- [Persistent Volume Claims]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) -- [Certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/) -- [ConfigMaps]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/) -- [Registries]({{}}/rancher/v2.x/en/k8s-in-rancher/registries/) -- [Secrets]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/) - -To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. - -> **Note:** If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher]({{}}/rancher/v2.x/en/project-admin/namespaces/#creating-namespaces) to ensure that you will have permission to access the namespace. - - -### Creating Namespaces - -Create a new namespace to isolate apps and resources in a project. - ->**Tip:** When working with project resources that you can assign to a namespace (i.e., [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/), [certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/), [ConfigMaps]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps), etc.) you can create a namespace on the fly. - -1. From the **Global** view, open the project where you want to create a namespace. - - >**Tip:** As a best practice, we recommend creating namespaces from the project level. However, cluster owners and members can create them from the cluster level as well. - -1. From the main menu, select **Namespace**. The click **Add Namespace**. - -1. **Optional:** If your project has [Resource Quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) in effect, you can override the default resource **Limits** (which places a cap on the resources that the namespace can consume). - -1. Enter a **Name** and then click **Create**. - -**Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. - -### Moving Namespaces to Another Project - -Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. - -1. From the **Global** view, open the cluster that contains the namespace you want to move. - -1. From the main menu, select **Projects/Namespaces**. - -1. Select the namespace(s) that you want to move to a different project. Then click **Move**. You can move multiple namespaces at one. - - >**Notes:** - > - >- Don't move the namespaces in the `System` project. Moving these namespaces can adversely affect cluster networking. - >- You cannot move a namespace into a project that already has a [resource quota]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/) configured. - >- If you move a namespace from a project that has a quota set to a project with no quota set, the quota is removed from the namespace. - -1. Choose a new project for the new namespace and then click **Move**. Alternatively, you can remove the namespace from all projects by selecting **None**. - -**Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. - -### Editing Namespace Resource Quotas - -You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. - -For more information, see how to [edit namespace resource quotas]({{}}/rancher/v2.x/en/project-admin//resource-quotas/override-namespace-default/#editing-namespace-resource-quotas). \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/_index.md b/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/_index.md deleted file mode 100644 index 4f1497b3a1b..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Project Applications -weight: 2525 ---- - -> This section is under construction. - -Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - - -- [Notifiers and Alerts](#notifiers-and-alerts) -- [Logging](#logging) -- [Monitoring](#monitoring) - - - -## Notifiers and Alerts - -Notifiers and alerts are two features that work together to inform you of events in the Rancher system. - -[Notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. - -[Alerts]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts) are rules that trigger those notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. - -## Logging - -Logging is helpful because it allows you to: - -- Capture and analyze the state of your cluster -- Look for trends in your environment -- Save your logs to a safe location outside of your cluster -- Stay informed of events like a container crashing, a pod eviction, or a node dying -- More easily debugg and troubleshoot problems - -Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. - -For details, refer to the [logging section.]({{}}/rancher/v2.x/en/cluster-admin/tools/logging) - -## Monitoring - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. For details, refer to the [monitoring section.]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring) diff --git a/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/alerts/_index.md b/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/alerts/_index.md deleted file mode 100644 index 0baf954ee0a..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/alerts/_index.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: Project Alerts -weight: 2 ---- - -To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. - -Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) and [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) of events they need to address. - -Before you can receive alerts, one or more [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) must be configured at the cluster level. - -Only [administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can manage project alerts. - -This section covers the following topics: - -- [Alerts scope](#alerts-scope) -- [Default project-level alerts](#default-project-level-alerts) -- [Adding project alerts](#adding-project-alerts) -- [Managing project alerts](#managing-project-alerts) - -## Alerts Scope - -The scope for alerts can be set at either the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) or project level. - -At the project level, Rancher monitors specific deployments and sends alerts for: - -* Deployment availability -* Workloads status -* Pod status -* The Prometheus expression cross the thresholds - -## Default Project-level Alerts - -When you enable monitoring for the project, some project-level alerts are provided. You can receive these alerts if a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) for them is configured at the cluster level. - -| Alert | Explanation | -|-------|-------------| -| Less than half workload available | A critical alert is triggered if less than half of a workload is available, based on workloads where the key is `app` and the value is `workload`. | -| Memory usage close to the quota | A warning alert is triggered if the workload's memory usage exceeds the memory resource quota that is set for the workload. You can see the memory limit in the Rancher UI if you go to the workload under the **Security & Host Config** tab. | - -For information on other default alerts, refer to the section on [cluster-level alerts.]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts) - -## Adding Project Alerts - ->**Prerequisite:** Before you can receive project alerts, you must add a notifier. - -1. From the **Global** view, navigate to the project that you want to configure project alerts for. Select **Tools > Alerts**. In versions prior to v2.2.0, you can choose **Resources > Alerts**. - -1. Click **Add Alert Group**. - -1. Enter a **Name** for the alert that describes its purpose, you could group alert rules for the different purpose. - -1. Based on the type of alert you want to create, complete one of the instruction subsets below. - -{{% accordion id="pod" label="Pod Alerts" %}} -This alert type monitors for the status of a specific pod. - -1. Select the **Pod** option, and then select a pod from the drop-down. -1. Select a pod status that triggers an alert: - - - **Not Running** - - **Not Scheduled** - - **Restarted `` times with the last `` Minutes** - -1. Select the urgency level of the alert. The options are: - - - **Critical**: Most urgent - - **Warning**: Normal urgency - - **Info**: Least urgent - - Select the urgency level of the alert based on pod state. For example, select **Info** for Job pod which stop running after job finished. However, if an important pod isn't scheduled, it may affect operations, so choose **Critical**. - -1. Configure advanced options. By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - - - **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. - - **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. - - **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -{{% /accordion %}} -{{% accordion id="workload" label="Workload Alerts" %}} -This alert type monitors for the availability of a workload. - -1. Choose the **Workload** option. Then choose a workload from the drop-down. - -1. Choose an availability percentage using the slider. The alert is triggered when the workload's availability on your cluster nodes drops below the set percentage. - -1. Select the urgency level of the alert. - - - **Critical**: Most urgent - - **Warning**: Normal urgency - - **Info**: Least urgent - - Select the urgency level of the alert based on the percentage you choose and the importance of the workload. - -1. Configure advanced options. By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - - - **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. - - **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. - - **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -{{% /accordion %}} -{{% accordion id="workload-selector" label="Workload Selector Alerts" %}} -This alert type monitors for the availability of all workloads marked with tags that you've specified. - -1. Select the **Workload Selector** option, and then click **Add Selector** to enter the key value pair for a label. If one of the workloads drops below your specifications, an alert is triggered. This label should be applied to one or more of your workloads. - -1. Select the urgency level of the alert. - - - **Critical**: Most urgent - - **Warning**: Normal urgency - - **Info**: Least urgent - - Select the urgency level of the alert based on the percentage you choose and the importance of the workload. - -1. Configure advanced options. By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - - - **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. - - **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. - - **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -{{% /accordion %}} -{{% accordion id="project-expression" label="Metric Expression Alerts" %}} -
- -If you enable [project monitoring]({{}}/rancher/v2.x/en/project-admin/tools/#monitoring), this alert type monitors for the overload from Prometheus expression querying. - -1. Input or select an **Expression**, the drop down shows the original metrics from Prometheus, including: - - - [**Container**](https://github.com/google/cadvisor) - - [**Kubernetes Resources**](https://github.com/kubernetes/kube-state-metrics) - - [**Customize**]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#project-metrics) - - [**Project Level Grafana**](http://docs.grafana.org/administration/metrics/) - - **Project Level Prometheus** - -1. Choose a comparison. - - - **Equal**: Trigger alert when expression value equal to the threshold. - - **Not Equal**: Trigger alert when expression value not equal to the threshold. - - **Greater Than**: Trigger alert when expression value greater than to threshold. - - **Less Than**: Trigger alert when expression value equal or less than the threshold. - - **Greater or Equal**: Trigger alert when expression value greater to equal to the threshold. - - **Less or Equal**: Trigger alert when expression value less or equal to the threshold. - -1. Input a **Threshold**, for trigger alert when the value of expression cross the threshold. - -1. Choose a **Comparison**. - -1. Select a **Duration**, for trigger alert when expression value crosses the threshold longer than the configured duration. - -1. Select the urgency level of the alert. - - - **Critical**: Most urgent - - **Warning**: Normal urgency - - **Info**: Least urgent -
-
- Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a expression for container memory close to the limit raises above 60% deems an urgency of **Info**, but raised about 95% deems an urgency of **Critical**. - -1. Configure advanced options. By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - - - **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. - - **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. - - **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. -
-{{% /accordion %}} - -1. Continue adding more **Alert Rule** to the group. - -1. Finally, choose the [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) that send you alerts. - - - You can set up multiple notifiers. - - You can change notifier recipients on the fly. - -**Result:** Your alert is configured. A notification is sent when the alert is triggered. - -## Managing Project Alerts - -To manage project alerts, browse to the project that alerts you want to manage. Then select **Tools > Alerts**. In versions prior to v2.2.0, you can choose **Resources > Alerts**. You can: - -- Deactivate/Reactive alerts -- Edit alert settings -- Delete unnecessary alerts -- Mute firing alerts -- Unmute muted alerts diff --git a/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/istio/_index.md b/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/istio/_index.md deleted file mode 100644 index b64b9a7ae1d..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/istio/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Istio in Projects -weight: 1 ---- - -Using Rancher, you can connect, secure, control, and observe services through integration with [Istio](https://istio.io/), a leading open-source service mesh solution. Istio provides behavioral insights and operational control over the service mesh as a whole, offering a complete solution to satisfy the diverse requirements of microservice applications. - -This service mesh provides features that include but are not limited to the following: - -- Traffic management features -- Enhanced monitoring and tracing -- Service discovery and routing -- Secure connections and service-to-service authentication with mutual TLS -- Load balancing -- Automatic retries, backoff, and circuit breaking - -Istio needs to be set up by a Rancher administrator or cluster administrator before it can be used in a project for [comprehensive data visualizations,]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/#accessing-visualizations) traffic management, or any of its other features. - -For information on how Istio is integrated with Rancher and how to set it up, refer to the [section about Istio.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio) \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/logging/_index.md b/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/logging/_index.md deleted file mode 100644 index 9488490ab40..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/logging/_index.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Project Logging -weight: 3 ---- - -Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. - -For background information about how logging integrations work, refer to the [cluster administration section.]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/#how-logging-integrations-work) - -Rancher supports the following services: - -- Elasticsearch -- Splunk -- Kafka -- Syslog -- Fluentd - ->**Note:** You can only configure one logging service per cluster or per project. - -Only [administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure Rancher to send Kubernetes logs to a logging service. - -## Requirements - -The Docker daemon on each node in the cluster should be [configured](https://docs.docker.com/config/containers/logging/configure/) with the (default) log-driver: `json-file`. You can check the log-driver by running the following command: - -``` -$ docker info | grep 'Logging Driver' -Logging Driver: json-file -``` - -## Advantages - -Setting up a logging service to collect logs from your cluster/project has several advantages: - -- Logs errors and warnings in your Kubernetes infrastructure to a stream. The stream informs you of events like a container crashing, a pod eviction, or a node dying. -- Allows you to capture and analyze the state of your cluster and look for trends in your environment using the log stream. -- Helps you when troubleshooting or debugging. -- Saves your logs to a safe location outside of your cluster, so that you can still access them even if your cluster encounters issues. - -## Logging Scope - -You can configure logging at either cluster level or project level. - -- [Cluster logging]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), it also writes logs for all the Kubernetes system components. - -- Project logging writes logs for every pod in that particular project. - -Logs that are sent to your logging service are from the following locations: - - - Pod logs stored at `/var/log/containers`. - - - Kubernetes system components logs stored at `/var/lib/rancher/rke/logs/`. - -## Enabling Project Logging - -1. From the **Global** view, navigate to the project that you want to configure project logging. - -1. Select **Tools > Logging** in the navigation bar. In versions prior to v2.2.0, you can choose **Resources > Logging**. - -1. Select a logging service and enter the configuration. Refer to the specific service for detailed configuration. Rancher supports the following services: - - - [Elasticsearch]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/elasticsearch/) - - [Splunk]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/splunk/) - - [Kafka]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/kafka/) - - [Syslog]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/syslog/) - - [Fluentd]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/fluentd/) - -1. (Optional) Instead of using the UI to configure the logging services, you can enter custom advanced configurations by clicking on **Edit as File**, which is located above the logging targets. This link is only visible after you select a logging service. - - - With the file editor, enter raw fluentd configuration for any logging service. Refer to the documentation for each logging service on how to setup the output configuration. - - - [Elasticsearch Documentation](https://github.com/uken/fluent-plugin-elasticsearch) - - [Splunk Documentation](https://github.com/fluent/fluent-plugin-splunk) - - [Kafka Documentation](https://github.com/fluent/fluent-plugin-kafka) - - [Syslog Documentation](https://github.com/dlackty/fluent-plugin-remote_syslog) - - [Fluentd Documentation](https://docs.fluentd.org/v1.0/articles/out_forward) - - - If the logging service is using TLS, you also need to complete the **SSL Configuration** form. - 1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - 2. If you are using a self-signed certificate, provide the **CA Certificate PEM**. - -1. (Optional) Complete the **Additional Logging Configuration** form. - - 1. **Optional:** Use the **Add Field** button to add custom log fields to your logging configuration. These fields are key value pairs (such as `foo=bar`) that you can use to filter the logs from another system. - - 1. Enter a **Flush Interval**. This value determines how often [Fluentd](https://www.fluentd.org/) flushes data to the logging server. Intervals are measured in seconds. - - 1. **Include System Log**. The logs from pods in system project and RKE components will be sent to the target. Uncheck it to exclude the system logs. - -1. Click **Test**. Rancher sends a test log to the service. - - > **Note:** This button is replaced with _Dry Run_ if you are using the custom configuration editor. In this case, Rancher calls the fluentd dry run command to validate the configuration. - -1. Click **Save**. - -**Result:** Rancher is now configured to send logs to the selected service. Log into the logging service so that you can start viewing the logs. - -## Related Links - -[Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/) diff --git a/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/monitoring/_index.md b/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/monitoring/_index.md deleted file mode 100644 index d87d48f296c..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/project-admin/project-features/monitoring/_index.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: Project Monitoring -weight: 4 ---- - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. - -> For more information about how Prometheus works, refer to the [cluster administration section.]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#about-prometheus) - -This section covers the following topics: - -- [Monitoring scope](#monitoring-scope) -- [Permissions to configure project monitoring](#permissions-to-configure-project-monitoring) -- [Enabling project monitoring](#enabling-project-monitoring) -- [Project-level monitoring resource requirements](#project-level-monitoring-resource-requirements) -- [Project metrics](#project-metrics) - -### Monitoring Scope - -Using Prometheus, you can monitor Rancher at both the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) and project level. For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. - -- [Cluster monitoring]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. - - - [Kubernetes control plane]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#kubernetes-components-metrics) - - [etcd database]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#etcd-metrics) - - [All nodes (including workers)]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#cluster-metrics) - -- Project monitoring allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. - -### Permissions to Configure Project Monitoring - -Only [administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure project level monitoring. Project members can only view monitoring metrics. - -### Enabling Project Monitoring - -> **Prerequisite:** Cluster monitoring must be [enabled.]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) - -1. Go to the project where monitoring should be enabled. Note: When cluster monitoring is enabled, monitoring is also enabled by default in the **System** project. - -1. Select **Tools > Monitoring** in the navigation bar. - -1. Select **Enable** to show the [Prometheus configuration options]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/prometheus/). Enter in your desired configuration options. - -1. Click **Save**. - -### Project-Level Monitoring Resource Requirements - -Container| CPU - Request | Mem - Request | CPU - Limit | Mem - Limit | Configurable ----------|---------------|---------------|-------------|-------------|------------- -Prometheus|750m| 750Mi | 1000m | 1000Mi | Yes -Grafana | 100m | 100Mi | 200m | 200Mi | No - - -**Result:** A single application,`project-monitoring`, is added as an [application]({{}}/rancher/v2.x/en/catalog/apps/) to the project. After the application is `active`, you can start viewing [project metrics](#project-metrics) through the [Rancher dashboard]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#rancher-dashboard) or directly from [Grafana]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#grafana). - -> The default username and password for the Grafana instance will be `admin/admin`. However, Grafana dashboards are served via the Rancher authentication proxy, so only users who are currently authenticated into the Rancher server have access to the Grafana dashboard. - -### Project Metrics - -[Workload metrics]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#workload-metrics) are available for the project if monitoring is enabled at the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) and at the [project level.](#enabling-project-monitoring) - -You can monitor custom metrics from any [exporters.](https://prometheus.io/docs/instrumenting/exporters/) You can also expose some custom endpoints on deployments without needing to configure Prometheus for your project. - -> **Example:** -> A [Redis](https://redis.io/) application is deployed in the namespace `redis-app` in the project `Datacenter`. It is monitored via [Redis exporter](https://github.com/oliver006/redis_exporter). After enabling project monitoring, you can edit the application to configure the Advanced Options -> Custom Metrics section. Enter the `Container Port` and `Path` and select the `Protocol`. - -To access a project-level Grafana instance, - -1. From the **Global** view, navigate to a cluster that has monitoring enabled. - -1. Go to a project that has monitoring enabled. - -1. From the project view, click **Apps.** In versions prior to v2.2.0, choose **Catalog Apps** on the main navigation bar. - -1. Go to the `project-monitoring` application. - -1. In the `project-monitoring` application, there are two `/index.html` links: one that leads to a Grafana instance and one that leads to a Prometheus instance. When you click the Grafana link, it will redirect you to a new webpage for Grafana, which shows metrics for the cluster. - -1. You will be signed in to the Grafana instance automatically. The default username is `admin` and the default password is `admin`. For security, we recommend that you log out of Grafana, log back in with the `admin` password, and change your password. - -**Results:** You will be logged into Grafana from the Grafana instance. After logging in, you can view the preset Grafana dashboards, which are imported via the [Grafana provisioning mechanism](http://docs.grafana.org/administration/provisioning/#dashboards), so you cannot modify them directly. For now, if you want to configure your own dashboards, clone the original and modify the new copy. diff --git a/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/_index.md b/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/_index.md deleted file mode 100644 index 0001609eeb5..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Project Resource Quotas -weight: 3 ---- - -In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. - -This page is a how-to guide for creating resource quotas in existing projects. - -Resource quotas can also be set when a new project is created. For details, refer to the section on [creating new projects.]({{}}/rancher/v2.x/en/cluster-admin/projects-and-namespaces/#creating-projects) - -> Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects). For details on how resource quotas work with projects in Rancher, refer to [this page.](./quotas-for-projects) - -### Applying Resource Quotas to Existing Projects - -Edit [resource quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) when: - -- You want to limit the resources that a project and its namespaces can use. -- You want to scale the resources available to a project up or down when a research quota is already in effect. - -1. From the **Global** view, open the cluster containing the project to which you want to apply a resource quota. - -1. From the main menu, select **Projects/Namespaces**. - -1. Find the project that you want to add a resource quota to. From that project, select **⋮ > Edit**. - -1. Expand **Resource Quotas** and click **Add Quota**. Alternatively, you can edit existing quotas. - -1. Select a [Resource Type]({{}}/rancher/v2.x/en/project-admin/resource-quotas/#resource-quota-types). - -1. Enter values for the **Project Limit** and the **Namespace Default Limit**. - - | Field | Description | - | ----------------------- | -------------------------------------------------------------------------------------------------------- | - | Project Limit | The overall resource limit for the project. | - | Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project. The combined limit of all project namespaces shouldn't exceed the project limit. | - -1. **Optional:** Add more quotas. - -1. Click **Create**. - -**Result:** The resource quota is applied to your project and namespaces. When you add more namespaces in the future, Rancher validates that the project can accommodate the namespace. If the project can't allocate the resources, Rancher won't let you save your changes. diff --git a/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/override-container-default/_index.md b/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/override-container-default/_index.md deleted file mode 100644 index dde40c80fab..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/override-container-default/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Setting Container Default Resource Limits -weight: 3 ---- - -When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. - -To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. - -### Editing the Container Default Resource Limit - -Edit [container default resource limit]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#setting-container-default-resource-limit) when: - -- You have a CPU or Memory resource quota set on a project, and want to supply the corresponding default values for a container. -- You want to edit the default container resource limit. - -1. From the **Global** view, open the cluster containing the project to which you want to edit the container default resource limit. -1. From the main menu, select **Projects/Namespaces**. -1. Find the project that you want to edit the container default resource limit. From that project, select **⋮ > Edit**. -1. Expand **Container Default Resource Limit** and edit the values. - -### Resource Limit Propagation - -When the default container resource limit is set at a project level, the parameter will be propagated to any namespace created in the project after the limit has been set. For any existing namespace in a project, this limit will not be automatically propagated. You will need to manually set the default container resource limit for any existing namespaces in the project in order for it to be used when creating any containers. - -> **Note:** Prior to v2.2.0, you could not launch catalog applications that did not have any limits set. With v2.2.0, you can set a default container resource limit on a project and launch any catalog applications. - -Once a container default resource limit is configured on a namespace, the default will be pre-populated for any containers created in that namespace. These limits/reservations can always be overridden during workload creation. - -### Container Resource Quota Types - -The following resource limits can be configured: - -| Resource Type | Description | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| CPU Limit | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the container.| -| CPU Reservation | The minimum amount of CPU (in millicores) guaranteed to the container. | -| Memory Limit | The maximum amount of memory (in bytes) allocated to the container. | -| Memory Reservation | The minimum amount of memory (in bytes) guaranteed to the container. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/override-namespace-default/_index.md b/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/override-namespace-default/_index.md deleted file mode 100644 index f09feb9e83c..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/override-namespace-default/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Overriding the Default Limit for a Namespace -weight: 2 ---- - -Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. - -In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#editing-namespace-resource-quotas) for `Namespace 3` so that the namespace can access more resources. - -Namespace Default Limit Override -![Namespace Default Limit Override]({{}}/img/rancher/rancher-resource-quota-override.svg) - -How to: [Editing Namespace Resource Quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#editing-namespace-resource-quotas) - -### Editing Namespace Resource Quotas - -If there is a [resource quota]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. - -1. From the **Global** view, open the cluster that contains the namespace for which you want to edit the resource quota. - -1. From the main menu, select **Projects/Namespaces**. - -1. Find the namespace for which you want to edit the resource quota. Select **⋮ > Edit**. - -1. Edit the Resource Quota **Limits**. These limits determine the resources available to the namespace. The limits must be set within the configured project limits. - - For more information about each **Resource Type**, see [Resource Quota Types]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#resource-quota-types). - - >**Note:** - > - >- If a resource quota is not configured for the project, these options will not be available. - >- If you enter limits that exceed the configured project limits, Rancher will not let you save your edits. - -**Result:** Your override is applied to the namespace's resource quota. diff --git a/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/quota-type-reference/_index.md b/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/quota-type-reference/_index.md deleted file mode 100644 index e671a9afdb1..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/quota-type-reference/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Resource Quota Type Reference -weight: 4 ---- - -When you create a resource quota, you are configuring the pool of resources available to the project. You can set the following resource limits for the following resource types. - -| Resource Type | Description | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| CPU Limit* | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the project/namespace.1 | -| CPU Reservation* | The minimum amount of CPU (in millicores) guaranteed to the project/namespace.1 | -| Memory Limit* | The maximum amount of memory (in bytes) allocated to the project/namespace.1 | -| Memory Reservation* | The minimum amount of memory (in bytes) guaranteed to the project/namespace.1 | -| Storage Reservation | The minimum amount of storage (in gigabytes) guaranteed to the project/namespace. | -| Services Load Balancers | The maximum number of load balancers services that can exist in the project/namespace. | -| Services Node Ports | The maximum number of node port services that can exist in the project/namespace. | -| Pods | The maximum number of pods that can exist in the project/namespace in a non-terminal state (i.e., pods with a state of `.status.phase in (Failed, Succeeded)` equal to true). | -| Services | The maximum number of services that can exist in the project/namespace. | -| ConfigMaps | The maximum number of ConfigMaps that can exist in the project/namespace. | -| Persistent Volume Claims | The maximum number of persistent volume claims that can exist in the project/namespace. | -| Replications Controllers | The maximum number of replication controllers that can exist in the project/namespace. | -| Secrets | The maximum number of secrets that can exist in the project/namespace. | - ->***** When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. As of v2.2.0, a [container default resource limit](#setting-container-default-resource-limit) can be set at the same time to avoid the need to explicitly set these limits for every workload. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/quotas-for-projects/_index.md b/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/quotas-for-projects/_index.md deleted file mode 100644 index 60f8c59d91a..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/project-admin/resource-quotas/quotas-for-projects/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: How Resource Quotas Work in Rancher Projects -weight: 1 ---- - -Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects). - -In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. - -In the following diagram, a Kubernetes administrator is trying to enforce a resource quota without Rancher. The administrator wants to apply a resource quota that sets the same CPU and memory limit to every namespace in his cluster (`Namespace 1-4`) . However, in the base version of Kubernetes, each namespace requires a unique resource quota. The administrator has to create four different resource quotas that have the same specs configured (`Resource Quota 1-4`) and apply them individually. - -Base Kubernetes: Unique Resource Quotas Being Applied to Each Namespace -![Native Kubernetes Resource Quota Implementation]({{}}/img/rancher/kubernetes-resource-quota.svg) - -Resource quotas are a little different in Rancher. In Rancher, you apply a resource quota to the [project]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects), and then the quota propagates to each namespace, whereafter Kubernetes enforces your limits using the native version of resource quotas. If you want to change the quota for a specific namespace, you can [override it](#overriding-the-default-limit-for-a-namespace). - -The resource quota includes two limits, which you set while creating or editing a project: - - -- **Project Limits:** - - This set of values configures an overall resource limit for the project. If you try to add a new namespace to the project, Rancher uses the limits you've set to validate that the project has enough resources to accommodate the namespace. In other words, if you try to move a namespace into a project near its resource quota, Rancher blocks you from moving the namespace. - -- **Namespace Default Limits:** - - This value is the default resource limit available for each namespace. When the resource quota is created at the project level, this limit is automatically propagated to each namespace in the project. Each namespace is bound to this default limit unless you [override it](#namespace-default-limit-overrides). - -In the following diagram, a Rancher administrator wants to apply a resource quota that sets the same CPU and memory limit for every namespace in their project (`Namespace 1-4`). However, in Rancher, the administrator can set a resource quota for the project (`Project Resource Quota`) rather than individual namespaces. This quota includes resource limits for both the entire project (`Project Limit`) and individual namespaces (`Namespace Default Limit`). Rancher then propagates the `Namespace Default Limit` quotas to each namespace (`Namespace Resource Quota`) when created. - -Rancher: Resource Quotas Propagating to Each Namespace -![Rancher Resource Quota Implementation]({{}}/img/rancher/rancher-resource-quota.svg) - -Let's highlight some more nuanced functionality. If a quota is deleted at the project level, it will also be removed from all namespaces contained within that project, despite any overrides that may exist. Further, updating an existing namespace default limit for a quota at the project level will not result in that value being propagated to existing namespaces in the project; the updated value will only be applied to newly created namespaces in that project. To update a namespace default limit for existing namespaces you can delete and subsequently recreate the quota at the project level with the new default value. This will result in the new default value being applied to all existing namespaces in the project. - -The following table explains the key differences between the two quota types. - -| Rancher Resource Quotas | Kubernetes Resource Quotas | -| ---------------------------------------------------------- | -------------------------------------------------------- | -| Applies to projects and namespace. | Applies to namespaces only. | -| Creates resource pool for all namespaces in project. | Applies static resource limits to individual namespaces. | -| Applies resource quotas to namespaces through propagation. | Applies only to the assigned namespace. diff --git a/content/rancher/v2.5/en/cluster-explorer/registries/_index.md b/content/rancher/v2.5/en/cluster-explorer/registries/_index.md deleted file mode 100644 index 16429e23910..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/registries/_index.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Kubernetes Registry and Docker Registry -description: Learn about the Docker registry and Kubernetes registry, their use cases and how to use a private registry with the Rancher UI -weight: 6 ---- -Registries are Kubernetes secrets containing credentials used to authenticate with [private Docker registries](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). - -The word "registry" can mean two things, depending on whether it is used to refer to a Docker or Kubernetes registry: - -- A **Docker registry** contains Docker images that you can pull in order to use them in your deployment. The registry is a stateless, scalable server side application that stores and lets you distribute Docker images. -- The **Kubernetes registry** is an image pull secret that your deployment uses to authenticate with a Docker registry. - -Deployments use the Kubernetes registry secret to authenticate with a private Docker registry and then pull a Docker image hosted on it. - -Currently, deployments pull the private registry credentials automatically only if the workload is created in the Rancher UI and not when it is created via kubectl. - -# Creating a Registry - ->**Prerequisites:** You must have a [private registry](https://docs.docker.com/registry/deploying/) available to use. - -1. From the **Global** view, select the project containing the namespace(s) where you want to add a registry. - -1. From the main menu, click **Resources > Secrets > Registry Credentials.** (For Rancher prior to v2.3, click **Resources > Registries.)** - -1. Click **Add Registry.** - -1. Enter a **Name** for the registry. - - >**Note:** Kubernetes classifies secrets, certificates, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your registry must have a unique name among all secrets within your workspace. - -1. Select a **Scope** for the registry. You can either make the registry available for the entire project or a single [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). - -1. Select the website that hosts your private registry. Then enter credentials that authenticate with the registry. For example, if you use DockerHub, provide your DockerHub username and password. - -1. Click **Save**. - -**Result:** - -- Your secret is added to the project or namespace, depending on the scope you chose. -- You can view the secret in the Rancher UI from the **Resources > Registries** view. -- Any workload that you create in the Rancher UI will have the credentials to access the registry if the workload is within the registry's scope. - -# Using a Private Registry - -You can deploy a workload with an image from a private registry through the Rancher UI, or with `kubectl`. - -### Using the Private Registry with the Rancher UI - -To deploy a workload with an image from your private registry, - -1. Go to the project view, -1. Click **Resources > Workloads.** In versions prior to v2.3.0, go to the **Workloads** tab. -1. Click **Deploy.** -1. Enter a unique name for the workload and choose a namespace. -1. In the **Docker Image** field, enter the URL of the path to the Docker image in your private registry. For example, if your private registry is on Quay.io, you could use `quay.io//`. -1. Click **Launch.** - -**Result:** Your deployment should launch, authenticate using the private registry credentials you added in the Rancher UI, and pull the Docker image that you specified. - -### Using the Private Registry with kubectl - -When you create the workload using `kubectl`, you need to configure the pod so that its YAML has the path to the image in the private registry. You also have to create and reference the registry secret because the pod only automatically gets access to the private registry credentials if it is created in the Rancher UI. - -The secret has to be created in the same namespace where the workload gets deployed. - -Below is an example `pod.yml` for a workload that uses an image from a private registry. In this example, the pod uses an image from Quay.io, and the .yml specifies the path to the image. The pod authenticates with the registry using credentials stored in a Kubernetes secret called `testquay`, which is specified in `spec.imagePullSecrets` in the `name` field: - -``` -apiVersion: v1 -kind: Pod -metadata: - name: private-reg -spec: - containers: - - name: private-reg-container - image: quay.io// - imagePullSecrets: - - name: testquay -``` - -In this example, the secret named `testquay` is in the default namespace. - -You can use `kubectl` to create the secret with the private registry credentials. This command creates the secret named `testquay`: - -``` -kubectl create secret docker-registry testquay \ - --docker-server=quay.io \ - --docker-username= \ - --docker-password= -``` - -To see how the secret is stored in Kubernetes, you can use this command: - -``` -kubectl get secret testquay --output="jsonpath={.data.\.dockerconfigjson}" | base64 --decode -``` - -The result looks like this: - -``` -{"auths":{"quay.io":{"username":"","password":"","auth":"c291bXlhbGo6dGVzdGFiYzEyMw=="}}} -``` - -After the workload is deployed, you can check if the image was pulled successfully: - -``` -kubectl get events -``` -The result should look like this: -``` -14s Normal Scheduled Pod Successfully assigned default/private-reg2 to minikube -11s Normal Pulling Pod pulling image "quay.io//" -10s Normal Pulled Pod Successfully pulled image "quay.io//" -``` - -For more information, refer to the Kubernetes documentation on [creating a pod that uses your secret.](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) diff --git a/content/rancher/v2.5/en/cluster-explorer/secrets/_index.md b/content/rancher/v2.5/en/cluster-explorer/secrets/_index.md deleted file mode 100644 index 30ea15c7c49..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/secrets/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Secrets -weight: 4 ---- - -[Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets) store sensitive data like passwords, tokens, or keys. They may contain one or more key value pairs. - -> This page is about secrets in general. For details on setting up a private registry, refer to the section on [registries.]({{}}/rancher/v2.x/en/k8s-in-rancher/registries) - -When configuring a workload, you'll be able to choose which secrets to include. Like config maps, secrets can be referenced by workloads as either an environment variable or a volume mount. - -Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) - -# Creating Secrets - -When creating a secret, you can make it available for any deployment within a project, or you can limit it to a single namespace. - -1. From the **Global** view, select the project containing the namespace(s) where you want to add a secret. - -2. From the main menu, select **Resources > Secrets**. Click **Add Secret**. - -3. Enter a **Name** for the secret. - - >**Note:** Kubernetes classifies secrets, certificates, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your secret must have a unique name among all secrets within your workspace. - -4. Select a **Scope** for the secret. You can either make the registry available for the entire project or a single [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). - -5. From **Secret Values**, click **Add Secret Value** to add a key value pair. Add as many values as you need. - - >**Tip:** You can add multiple key value pairs to the secret by copying and pasting. - > - > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} - -1. Click **Save**. - -**Result:** Your secret is added to the project or namespace, depending on the scope you chose. You can view the secret in the Rancher UI from the **Resources > Secrets** view. - -Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) - -# What's Next? - -Now that you have a secret added to the project or namespace, you can add it to a workload that you deploy. - -For more information on adding secret to a workload, see [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). diff --git a/content/rancher/v2.5/en/cluster-explorer/service-discovery/_index.md b/content/rancher/v2.5/en/cluster-explorer/service-discovery/_index.md deleted file mode 100644 index 721ce42d9be..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/service-discovery/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Service Discovery -weight: 2 ---- - -For every workload created, a complementing Service Discovery entry is created. This Service Discovery entry enables DNS resolution for the workload's pods using the following naming convention: -`..svc.cluster.local`. - -However, you also have the option of creating additional Service Discovery records. You can use these additional records so that a given [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) resolves with one or more external IP addresses, an external hostname, an alias to another DNS record, other workloads, or a set of pods that match a selector that you create. - -1. From the **Global** view, open the project that you want to add a DNS record to. - -1. Click **Resources** in the main navigation bar. Click the **Service Discovery** tab. (In versions prior to v2.3.0, just click the **Service Discovery** tab.) Then click **Add Record**. - -1. Enter a **Name** for the DNS record. This name is used for DNS resolution. - -1. Select a **Namespace** from the drop-down list. Alternatively, you can create a new namespace on the fly by clicking **Add to a new namespace**. - -1. Select one of the **Resolves To** options to route requests to the DNS record. - - 1. **One or more external IP addresses** - - Enter an IP address in the **Target IP Addresses** field. Add more IP addresses by clicking **Add Target IP**. - - 1. **An external hostname** - - Enter a **Target Hostname**. - - 1. **Alias of another DNS record's value** - - Click **Add Target Record** and select another DNS record from the **Value** drop-down. - - 1. **One or more workloads** - - Click **Add Target Workload** and select another workload from the **Value** drop-down. - - 1. **The set of pods which match a selector** - - Enter key value pairs of [label selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) to create a record for all pods that match your parameters. - -1. Click **Create** - -**Result:** A new DNS record is created. - -- You can view the record by from the project's **Service Discovery** tab. -- When you visit the new DNS name for the new record that you created (`..svc.cluster.local`), it resolves the chosen namespace. - -## Related Links - -- [Adding entries to Pod /etc/hosts with HostAliases](https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/) diff --git a/content/rancher/v2.5/en/cluster-explorer/storage/_index.md b/content/rancher/v2.5/en/cluster-explorer/storage/_index.md deleted file mode 100644 index 5df888b5f05..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/storage/_index.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: "Storage" -description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" -weight: 17 ---- -When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. - -The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](./how-storage-works) - -### Prerequisites - -To set up persistent storage, the `Manage Volumes` [role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) is required. - -If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. - -For provisioning new storage with Rancher, the cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) - -For attaching existing persistent storage to a cluster, the cloud provider does not need to be enabled. - -### Setting up Existing Storage - -The overall workflow for setting up existing storage is as follows: - -1. Set up persistent storage in an infrastructure provider. -2. Add a persistent volume (PV) that refers to the persistent storage. -3. Add a persistent volume claim (PVC) that refers to the PV. -4. Mount the PVC as a volume in your workload. - -For details and prerequisites, refer to [this page.](./attaching-existing-storage) - -### Dynamically Provisioning New Storage in Rancher - -The overall workflow for provisioning new storage is as follows: - -1. Add a storage class and configure it to use your storage provider. -2. Add a persistent volume claim (PVC) that refers to the storage class. -3. Mount the PVC as a volume for your workload. - -For details and prerequisites, refer to [this page.](./provisioning-new-storage) - -### Provisioning Storage Examples - -We provide examples of how to provision storage with [NFS,](./examples/nfs) [vSphere,](./examples/vsphere) and [Amazon's EBS.](./examples/ebs) - -### GlusterFS Volumes - -In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. For details on preventing this from happening, refer to [this page.](./glusterfs-volumes) - -### iSCSI Volumes - -In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. For details on resolving this issue, refer to [this page.](./iscsi-volumes) - -### hostPath Volumes -Before you create a hostPath volume, you need to set up an [extra_bind]({{}}/rke/latest/en/config-options/services/services-extras/#extra-binds/) in your cluster configuration. This will mount the path as a volume in your kubelets, which can then be used for hostPath volumes in your workloads. - -### Related Links - -- [Kubernetes Documentation: Storage](https://kubernetes.io/docs/concepts/storage/) diff --git a/content/rancher/v2.5/en/cluster-explorer/storage/attaching-existing-storage/_index.md b/content/rancher/v2.5/en/cluster-explorer/storage/attaching-existing-storage/_index.md deleted file mode 100644 index c020ad8712a..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/storage/attaching-existing-storage/_index.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Setting up Existing Storage -weight: 3 ---- - -This section describes how to set up existing persistent storage for workloads in Rancher. - -> This section assumes that you understand the Kubernetes concepts of persistent volumes and persistent volume claims. For more information, refer to the section on [how storage works.](../how-storage-works) - -To set up storage, follow these steps: - -1. [Set up persistent storage in an infrastructure provider.](#1-set-up-persistent-storage-in-an-infrastructure-provider) -2. [Add a persistent volume that refers to the persistent storage.](#2-add-a-persistent-volume-that-refers-to-the-persistent-storage) -3. [Add a persistent volume claim that refers to the persistent volume.](#3-add-a-persistent-volume-claim-that-refers-to-the-persistent-volume) -4. [Mount the persistent volume claim as a volume in your workload.](#4-mount-the-persistent-storage-claim-as-a-volume-in-your-workload) - -### Prerequisites - -- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) -- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. - -### 1. Set up persistent storage in an infrastructure provider - -Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. - -The steps to set up a persistent storage device will differ based on your infrastructure. We provide examples of how to set up storage using [vSphere,](../examples/vsphere) [NFS,](../examples/nfs) or Amazon's [EBS.](../examples/ebs) - -### 2. Add a persistent volume that refers to the persistent storage - -These steps describe how to set up a persistent volume at the cluster level in Kubernetes. - -1. From the cluster view, select **Storage > Persistent Volumes**. - -1. Click **Add Volume**. - -1. Enter a **Name** for the persistent volume. - -1. Select the **Volume Plugin** for the disk type or service that you're using. When adding storage to a cluster that's hosted by a cloud provider, use the cloud provider's plug-in for cloud storage. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, you must use the `Amazon EBS Disk` volume plugin. - -1. Enter the **Capacity** of your volume in gigabytes. - -1. Complete the **Plugin Configuration** form. Each plugin type requires information specific to the vendor of disk type. For help regarding each plugin's form and the information that's required, refer to the plug-in's vendor documentation. - -1. Optional: In the **Customize** form, configure the [access modes.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) This options sets how many nodes can access the volume, along with the node read/write permissions. The [Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) includes a table that lists which access modes are supported by the plugins available. - -1. Optional: In the **Customize** form, configure the [mount options.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options) Each volume plugin allows you to specify additional command line options during the mounting process. Consult each plugin's vendor documentation for the mount options available. - -1. Click **Save**. - -**Result:** Your new persistent volume is created. - -### 3. Add a persistent volume claim that refers to the persistent volume - -These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. - -1. Go to the project containing a workload that you want to add a persistent volume claim to. - -1. Then click the **Volumes** tab and click **Add Volume**. (In versions prior to v2.3.0, click **Workloads** on the main navigation bar, then **Volumes.**) - -1. Enter a **Name** for the volume claim. - -1. Select the [Namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) of the workload that you want to add the persistent storage to. - -1. In the section called **Use an existing persistent volume,** go to the **Persistent Volume** drop-down and choose the persistent volume that you created. - -1. **Optional:** From **Customize**, select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. - -1. Click **Create.** - -**Result:** Your PVC is created. You can now attach it to any workload in the project. - -### 4. Mount the persistent volume claim as a volume in your workload - -Mount PVCs to stateful workloads so that your applications can store their data. - -You can mount PVCs during the deployment of a workload, or following workload creation. - -The following steps describe how to assign existing storage to a new workload that is a stateful set: - -1. From the **Project** view, go to the **Workloads** tab. -1. Click **Deploy.** -1. Enter a name for the workload. -1. Next to the **Workload Type** field, click **More Options.** -1. Click **Stateful set of 1 pod.** Optionally, configure the number of pods. -1. Choose the namespace where the workload will be deployed. -1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. -1. In the **Persistent Volume Claim** field, select the PVC that you created. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Launch.** - -**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. - -The following steps describe how to assign persistent storage to an existing workload: - -1. From the **Project** view, go to the **Workloads** tab. -1. Go to the workload that you want to add the persistent storage to. The workload type should be a stateful set. Click **⋮ > Edit.** -1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. -1. In the **Persistent Volume Claim** field, select the PVC that you created. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Save.** - -**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/storage/examples/_index.md b/content/rancher/v2.5/en/cluster-explorer/storage/examples/_index.md deleted file mode 100644 index 28f5a3dcebd..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/storage/examples/_index.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Provisioning Storage Examples -weight: 4 ---- - -Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. - -For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: - -- [NFS]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/nfs/) -- [vSphere]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/vsphere/) diff --git a/content/rancher/v2.5/en/cluster-explorer/storage/examples/ebs/_index.md b/content/rancher/v2.5/en/cluster-explorer/storage/examples/ebs/_index.md deleted file mode 100644 index b854daf0ef4..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/storage/examples/ebs/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Creating Persistent Storage in Amazon's EBS -weight: 3053 ---- - -This section describes how to set up Amazon's Elastic Block Store in EC2. - -1. From the EC2 console, go to the **ELASTIC BLOCK STORE** section in the left panel and click **Volumes.** -1. Click **Create Volume.** -1. Optional: Configure the size of the volume or other options. The volume should be created in the same availability zone as the instance it will be attached to. -1. Click **Create Volume.** -1. Click **Close.** - -**Result:** Persistent storage has been created. - -For details on how to set up the newly created storage in Rancher, refer to the section on [setting up existing storage.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/) \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/storage/examples/nfs/_index.md b/content/rancher/v2.5/en/cluster-explorer/storage/examples/nfs/_index.md deleted file mode 100644 index df801948faa..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/storage/examples/nfs/_index.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: NFS Storage -weight: 3054 ---- - -Before you can use the NFS storage volume plug-in with Rancher deployments, you need to provision an NFS server. - ->**Note:** -> ->- If you already have an NFS share, you don't need to provision a new NFS server to use the NFS volume plugin within Rancher. Instead, skip the rest of this procedure and complete [adding storage]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/). -> ->- This procedure demonstrates how to set up an NFS server using Ubuntu, although you should be able to use these instructions for other Linux distros (e.g. Debian, RHEL, Arch Linux, etc.). For official instruction on how to create an NFS server using another Linux distro, consult the distro's documentation. - ->**Recommended:** To simplify the process of managing firewall rules, use NFSv4. - -1. Using a remote Terminal connection, log into the Ubuntu server that you intend to use for NFS storage. - -1. Enter the following command: - - ``` - sudo apt-get install nfs-kernel-server - ``` - -1. Enter the command below, which sets the directory used for storage, along with user access rights. Modify the command if you'd like to keep storage at a different directory. - - ``` - mkdir -p /nfs && chown nobody:nogroup /nfs - ``` - - The `-p /nfs` parameter creates a directory named `nfs` at root. - - The `chown nobody:nogroup /nfs` parameter allows all access to the storage directory. - -1. Create an NFS exports table. This table sets the directory paths on your NFS server that are exposed to the nodes that will use the server for storage. - - 1. Open `/etc/exports` using your text editor of choice. - 1. Add the path of the `/nfs` folder that you created in step 3, along with the IP addresses of your cluster nodes. Add an entry for each IP address in your cluster. Follow each address and its accompanying parameters with a single space that is a delimiter. - - ``` - /nfs (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) - ``` - - **Tip:** You can replace the IP addresses with a subnet. For example: `10.212.50.12/24` - - 1. Update the NFS table by entering the following command: - - ``` - exportfs -ra - ``` - -1. Open the ports used by NFS. - - 1. To find out what ports NFS is using, enter the following command: - - ``` - rpcinfo -p | grep nfs - ``` - 2. [Open the ports](https://help.ubuntu.com/lts/serverguide/firewall.html.en) that the previous command outputs. For example, the following command opens port 2049: - - ``` - sudo ufw allow 2049 - ``` - -**Result:** Your NFS server is configured to be used for storage with your Rancher nodes. - -## What's Next? - -Within Rancher, add the NFS server as a [storage volume]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#adding-a-persistent-volume) and/or [storage class]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#adding-storage-classes). After adding the server, you can use it for storage for your deployments. diff --git a/content/rancher/v2.5/en/cluster-explorer/storage/examples/vsphere/_index.md b/content/rancher/v2.5/en/cluster-explorer/storage/examples/vsphere/_index.md deleted file mode 100644 index 8a1e5b2a524..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/storage/examples/vsphere/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: vSphere Storage -weight: 3055 ---- - -To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume [storage class]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#storage-classes). This practice dynamically provisions vSphere storage when workloads request volumes through a [persistent volume claim]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/). - -### Prerequisites - -In order to provision vSphere volumes in a cluster created with the [Rancher Kubernetes Engine (RKE)]({{< baseurl>}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the [vSphere cloud provider]({{}}/rke/latest/en/config-options/cloud-providers/vsphere) must be explicitly enabled in the [cluster options]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/). - -### Creating A Storage Class - -> **Note:** -> -> The following steps can also be performed using the `kubectl` command line tool. See [Kubernetes documentation on persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) for details. - -1. From the Global view, open the cluster where you want to provide vSphere storage. -2. From the main menu, select **Storage > Storage Classes**. Then click **Add Class**. -3. Enter a **Name** for the class. -4. Under **Provisioner**, select **VMWare vSphere Volume**. - - {{< img "/img/rancher/vsphere-storage-class.png" "vsphere-storage-class">}} - -5. Optionally, specify additional properties for this storage class under **Parameters**. Refer to the [vSphere storage documentation](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/storageclass.html) for details. -5. Click **Save**. - -### Creating a Workload with a vSphere Volume - -1. From the cluster where you configured vSphere storage, begin creating a workload as you would in [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). -2. For **Workload Type**, select **Stateful set of 1 pod**. -3. Expand the **Volumes** section and click **Add Volume**. -4. Choose **Add a new persistent volume (claim)**. This option will implicitly create the claim once you deploy the workload. -5. Assign a **Name** for the claim, ie. `test-volume` and select the vSphere storage class created in the previous step. -6. Enter the required **Capacity** for the volume. Then click **Define**. - - {{< img "/img/rancher/workload-add-volume.png" "workload-add-volume">}} - -7. Assign a path in the **Mount Point** field. This is the full path where the volume will be mounted in the container file system, e.g. `/persistent`. -8. Click **Launch** to create the workload. - -### Verifying Persistence of the Volume - -1. From the context menu of the workload you just created, click **Execute Shell**. -2. Note the directory at root where the volume has been mounted to (in this case `/persistent`). -3. Create a file in the volume by executing the command `touch //data.txt`. -4. **Close** the shell window. -5. Click on the name of the workload to reveal detail information. -6. Open the context menu next to the Pod in the *Running* state. -7. Delete the Pod by selecting **Delete**. -8. Observe that the pod is deleted. Then a new pod is scheduled to replace it so that the workload maintains its configured scale of a single stateful pod. -9. Once the replacement pod is running, click **Execute Shell**. -10. Inspect the contents of the directory where the volume is mounted by entering `ls -l /`. Note that the file you created earlier is still present. - - ![workload-persistent-data]({{}}/img/rancher/workload-persistent-data.png) - -## Why to Use StatefulSets Instead of Deployments - -You should always use [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) for workloads consuming vSphere storage, as this resource type is designed to address a VMDK block storage caveat. - -Since vSphere volumes are backed by VMDK block storage, they only support an [access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) of `ReadWriteOnce`. This setting restricts the volume so that it can only be mounted to a single pod at a time, unless all pods consuming that volume are co-located on the same node. This behavior makes a deployment resource unusable for scaling beyond a single replica if it consumes vSphere volumes. - -Even using a deployment resource with just a single replica may result in a deadlock situation while updating the deployment. If the updated pod is scheduled to a node different from where the existing pod lives, it will fail to start because the VMDK is still attached to the other node. - -## Related Links - -- [vSphere Storage for Kubernetes](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) -- [Kubernetes Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/content/rancher/v2.5/en/cluster-explorer/storage/glusterfs-volumes/_index.md b/content/rancher/v2.5/en/cluster-explorer/storage/glusterfs-volumes/_index.md deleted file mode 100644 index 99e06766cf8..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/storage/glusterfs-volumes/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: GlusterFS Volumes -weight: 5 ---- - -> This section only applies to [RKE clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) - -In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. The logging of the `kubelet` will show: `transport endpoint is not connected`. To prevent this from happening, you can configure your cluster to mount the `systemd-run` binary in the `kubelet` container. There are two requirements before you can change the cluster configuration: - -- The node needs to have the `systemd-run` binary installed (this can be checked by using the command `which systemd-run` on each cluster node) -- The `systemd-run` binary needs to be compatible with Debian OS on which the hyperkube image is based (this can be checked using the following command on each cluster node, replacing the image tag with the Kubernetes version you want to use) - -``` -docker run -v /usr/bin/systemd-run:/usr/bin/systemd-run --entrypoint /usr/bin/systemd-run rancher/hyperkube:v1.16.2-rancher1 --version -``` - ->**Note:** -> ->Before updating your Kubernetes YAML to mount the `systemd-run` binary, make sure the `systemd` package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. - -``` -services: - kubelet: - extra_binds: - - "/usr/bin/systemd-run:/usr/bin/systemd-run" -``` - -After the cluster has finished provisioning, you can check the `kubelet` container logging to see if the functionality is activated by looking for the following logline: - -``` -Detected OS with systemd -``` \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/storage/how-storage-works/_index.md b/content/rancher/v2.5/en/cluster-explorer/storage/how-storage-works/_index.md deleted file mode 100644 index 6e257fa85c3..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/storage/how-storage-works/_index.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: How Persistent Storage Works -weight: 1 ---- - -A persistent volume (PV) is a piece of storage in the Kubernetes cluster, while a persistent volume claim (PVC) is a request for storage. - -There are two ways to use persistent storage in Kubernetes: - -- Use an existing persistent volume -- Dynamically provision new persistent volumes - -To use an existing PV, your application will need to use a PVC that is bound to a PV, and the PV should include the minimum resources that the PVC requires. - -For dynamic storage provisioning, your application will need to use a PVC that is bound to a storage class. The storage class contains the authorization to provision new persistent volumes. - -![Setting Up New and Existing Persistent Storage]({{}}/img/rancher/rancher-storage.svg) - -For more information, refer to the [official Kubernetes documentation on storage](https://kubernetes.io/docs/concepts/storage/volumes/) - -This section covers the following topics: - -- [About persistent volume claims](#about-persistent-volume-claims) - - [PVCs are required for both new and existing persistent storage](#pvcs-are-required-for-both-new-and-existing-persistent-storage) -- [Setting up existing storage with a PVC and PV](#setting-up-existing-storage-with-a-pvc-and-pv) - - [Binding PVs to PVCs](#binding-pvs-to-pvcs) -- [Provisioning new storage with a PVC and storage class](#provisioning-new-storage-with-a-pvc-and-storage-class) - -# About Persistent Volume Claims - -Persistent volume claims (PVCs) are objects that request storage resources from your cluster. They're similar to a voucher that your deployment can redeem for storage access. A PVC is mounted into a workloads as a volume so that the workload can claim its specified share of the persistent storage. - -To access persistent storage, a pod must have a PVC mounted as a volume. This PVC lets your deployment application store its data in an external location, so that if a pod fails, it can be replaced with a new pod and continue accessing its data stored externally, as though an outage never occurred. - -Each Rancher project contains a list of PVCs that you've created, available from **Resources > Workloads > Volumes.** (In versions prior to v2.3.0, the PVCs are in the **Volumes** tab.) You can reuse these PVCs when creating deployments in the future. - -### PVCs are Required for Both New and Existing Persistent Storage - -A PVC is required for pods to use any persistent storage, regardless of whether the workload is intended to use storage that already exists, or the workload will need to dynamically provision new storage on demand. - -If you are setting up existing storage for a workload, the workload mounts a PVC, which refers to a PV, which corresponds to existing storage infrastructure. - -If a workload should request new storage, the workload mounts PVC, which refers to a storage class, which has the capability to create a new PV along with its underlying storage infrastructure. - -Rancher lets you create as many PVCs within a project as you'd like. - -You can mount PVCs to a deployment as you create it, or later, after the deployment is running. - -# Setting up Existing Storage with a PVC and PV - -Your pods can store data in [volumes,](https://kubernetes.io/docs/concepts/storage/volumes/) but if the pod fails, that data is lost. To solve this issue, Kubernetes offers persistent volumes (PVs), which are Kubernetes resources that correspond to external storage disks or file systems that your pods can access. If a pod crashes, its replacement pod can access the data in persistent storage without any data loss. - -PVs can represent a physical disk or file system that you host on premise, or a vendor-hosted storage resource, such as Amazon EBS or Azure Disk. - -Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. - -> **Important:** PVs are created at the cluster level, which means that in a multi-tenant cluster, teams with access to separate namespaces could have access to the same PV. - -### Binding PVs to PVCs - -When pods are set up to use persistent storage, they mount a persistent volume claim (PVC) that is mounted the same way as any other Kubernetes volume. When each PVC is created, the Kubernetes master considers it to be a request for storage and binds it to a PV that matches the minimum resource requirements of the PVC. Not every PVC is guaranteed to be bound to a PV. According to the Kubernetes [documentation,](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) - -> Claims will remain unbound indefinitely if a matching volume does not exist. Claims will be bound as matching volumes become available. For example, a cluster provisioned with many 50Gi PVs would not match a PVC requesting 100Gi. The PVC can be bound when a 100Gi PV is added to the cluster. - -In other words, you can create unlimited PVCs, but they will only be bound to PVs if the Kubernetes master can find a sufficient PVs that has at least the amount of disk space required by the PVC. - -To dynamically provision new storage, the PVC mounted in the pod would have to correspond to a storage class instead of a persistent volume. - -# Provisioning New Storage with a PVC and Storage Class - -Storage Classes allow you to create PVs dynamically without having to create persistent storage in an infrastructure provider first. - -For example, if a workload is bound to a PVC and the PVC refers to an Amazon EBS Storage Class, the storage class can dynamically create an EBS volume and a corresponding PV. - -The Kubernetes master will then bind the newly created PV to your workload's PVC, allowing your workload to use the persistent storage. - diff --git a/content/rancher/v2.5/en/cluster-explorer/storage/iscsi-volumes/_index.md b/content/rancher/v2.5/en/cluster-explorer/storage/iscsi-volumes/_index.md deleted file mode 100644 index 993e54c5c4a..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/storage/iscsi-volumes/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: iSCSI Volumes -weight: 6 ---- - -In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. - -Rancher Launched Kubernetes clusters storing data on iSCSI volumes leverage the [iSCSI initiator tool](http://www.open-iscsi.com/), which is embedded in the kubelet's `rancher/hyperkube` Docker image. From each kubelet (i.e., the _initiator_), the tool discovers and launches sessions with an iSCSI volume (i.e., the _target_). However, in some instances, the versions of the iSCSI initiator tool installed on the initiator and the target may not match, resulting in a connection failure. - -If you encounter this issue, you can work around it by installing the initiator tool on each node in your cluster. You can install the iSCSI initiator tool by logging into your cluster nodes and entering one of the following commands: - -| Platform | Package Name | Install Command | -| ------------- | ----------------------- | -------------------------------------- | -| Ubuntu/Debian | `open-iscsi` | `sudo apt install open-iscsi` | -| RHEL | `iscsi-initiator-utils` | `yum install iscsi-initiator-utils -y` | - - -After installing the initiator tool on your nodes, edit the YAML for your cluster, editing the kubelet configuration to mount the iSCSI binary and configuration, as shown in the sample below. - ->**Note:** -> ->Before updating your Kubernetes YAML to mount the iSCSI binary and configuration, make sure either the `open-iscsi` (deb) or `iscsi-initiator-utils` (yum) package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. - -``` -services: - kubelet: - extra_binds: - - "/etc/iscsi:/etc/iscsi" - - "/sbin/iscsiadm:/sbin/iscsiadm" -``` diff --git a/content/rancher/v2.5/en/cluster-explorer/storage/provisioning-new-storage/_index.md b/content/rancher/v2.5/en/cluster-explorer/storage/provisioning-new-storage/_index.md deleted file mode 100644 index 50f33cce160..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/storage/provisioning-new-storage/_index.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: Dynamically Provisioning New Storage in Rancher -weight: 2 ---- - -This section describes how to provision new persistent storage for workloads in Rancher. - -> This section assumes that you understand the Kubernetes concepts of storage classes and persistent volume claims. For more information, refer to the section on [how storage works.](../how-storage-works) - -To provision new storage for your workloads, follow these steps: - -1. [Add a storage class and configure it to use your storage provider.](#1-add-a-storage-class-and-configure-it-to-use-your-storage-provider) -2. [Add a persistent volume claim that refers to the storage class.](#2-add-a-persistent-volume-claim-that-refers-to-the-storage-class) -3. [Mount the persistent volume claim as a volume for your workload.](#3-mount-the-persistent-volume-claim-as-a-volume-for-your-workload) - -### Prerequisites - -- To set up persistent storage, the `Manage Volumes` [role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) is required. -- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. -- The cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) -- Make sure your storage provisioner is available to be enabled. - -The following storage provisioners are enabled by default: - -Name | Plugin ---------|---------- -Amazon EBS Disk | `aws-ebs` -AzureFile | `azure-file` -AzureDisk | `azure-disk` -Google Persistent Disk | `gce-pd` -Longhorn | `flex-volume-longhorn` -VMware vSphere Volume | `vsphere-volume` -Local | `local` -Network File System | `nfs` -hostPath | `host-path` - -To use a storage provisioner that is not on the above list, you will need to use a [feature flag to enable unsupported storage drivers.]({{}}/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers/) - -### 1. Add a storage class and configure it to use your storage provider - -These steps describe how to set up a storage class at the cluster level. - -1. Go to the cluster for which you want to dynamically provision persistent storage volumes. - -1. From the cluster view, select `Storage > Storage Classes`. Click `Add Class`. - -1. Enter a `Name` for your storage class. - -1. From the `Provisioner` drop-down, select the service that you want to use to dynamically provision storage volumes. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, use the `Amazon EBS Disk` provisioner. - -1. From the `Parameters` section, fill out the information required for the service to dynamically provision storage volumes. Each provisioner requires different information to dynamically provision storage volumes. Consult the service's documentation for help on how to obtain this information. - -1. Click `Save`. - -**Result:** The storage class is available to be consumed by a PVC. - -For full information about the storage class parameters, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters). - -### 2. Add a persistent volume claim that refers to the storage class - -These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. - -1. Go to the project containing a workload that you want to add a PVC to. - -1. From the main navigation bar, choose **Resources > Workloads.** (In versions prior to v2.3.0, choose **Workloads** on the main navigation bar.) Then select the **Volumes** tab. Click **Add Volume**. - -1. Enter a **Name** for the volume claim. - -1. Select the [Namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) of the volume claim. - -1. In the **Source** field, click **Use a Storage Class to provision a new persistent volume.** - -1. Go to the **Storage Class** drop-down and select the storage class that you created. - -1. Enter a volume **Capacity**. - -1. Optional: Expand the **Customize** section and select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. - -1. Click **Create.** - -**Result:** Your PVC is created. You can now attach it to any workload in the project. - -### 3. Mount the persistent volume claim as a volume for your workload - -Mount PVCs to workloads so that your applications can store their data. - -You can mount PVCs during the deployment of a workload, or following workload creation. - -To attach the PVC to a new workload, - -1. Create a workload as you would in [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). -1. For **Workload Type**, select **Stateful set of 1 pod**. -1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** -1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Launch.** - -**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. - -To attach the PVC to an existing workload, - -1. Go to the project that has the workload that will have the PVC attached. -1. Go to the workload that will have persistent storage and click **⋮ > Edit.** -1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** -1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Save.** - -**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. If not, Rancher will provision new persistent storage. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/workloads/_index.md b/content/rancher/v2.5/en/cluster-explorer/workloads/_index.md deleted file mode 100644 index 648c628f352..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/workloads/_index.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: "Kubernetes Workloads and Pods" -description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" -weight: 7 ---- - -You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. - -### Pods - -[_Pods_](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) are one or more containers that share network namespaces and storage volumes. Most pods have only one container. Therefore when we discuss _pods_, the term is often synonymous with _containers_. You scale pods the same way you scale containers—by having multiple instances of the same pod that implement a service. Usually pods get scaled and managed by the workload. - -### Workloads - -_Workloads_ are objects that set deployment rules for pods. Based on these rules, Kubernetes performs the deployment and updates the workload with the current state of the application. -Workloads let you define the rules for application scheduling, scaling, and upgrade. - -#### Workload Types - -Kubernetes divides workloads into different types. The most popular types supported by Kubernetes are: - -- [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) - - _Deployments_ are best used for stateless applications (i.e., when you don't have to maintain the workload's state). Pods managed by deployment workloads are treated as independent and disposable. If a pod encounters disruption, Kubernetes removes it and then recreates it. An example application would be an Nginx web server. - -- [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) - - _StatefulSets_, in contrast to deployments, are best used when your application needs to maintain its identity and store data. An application would be something like Zookeeper—an application that requires a database for storage. - -- [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) - - _Daemonsets_ ensures that every node in the cluster runs a copy of pod. For use cases where you're collecting logs or monitoring node performance, this daemon-like workload works best. - -- [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) - - _Jobs_ launch one or more pods and ensure that a specified number of them successfully terminate. Jobs are best used to run a finite task to completion as opposed to managing an ongoing desired application state. - -- [CronJobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) - - _CronJobs_ are similar to jobs. CronJobs, however, runs to completion on a cron-based schedule. - -### Services - -In many use cases, a workload has to be either: - -- Accessed by other workloads in the cluster. -- Exposed to the outside world. - -You can achieve these goals by creating a _Service_. Services are mapped to the underlying workload's pods using a [selector/label approach (view the code samples)](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#service-and-replicationcontroller). Rancher UI simplifies this mapping process by automatically creating a service along with the workload, using the service port and type that you select. - -#### Service Types - -There are several types of services available in Rancher. The descriptions below are sourced from the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types). - -- **ClusterIP** - - >Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster. This is the default `ServiceType`. - -- **NodePort** - - >Exposes the service on each Node’s IP at a static port (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will route, is automatically created. You’ll be able to contact the `NodePort` service, from outside the cluster, by requesting `:`. - -- **LoadBalancer** - - >Exposes the service externally using a cloud provider’s load balancer. `NodePort` and `ClusterIP` services, to which the external load balancer will route, are automatically created. - -## Workload Options - -This section of the documentation contains instructions for deploying workloads and using workload options. - -- [Deploy Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/) -- [Upgrade Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads/) -- [Rollback Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads/) - -## Related Links - -### External Links - -- [Services](https://kubernetes.io/docs/concepts/services-networking/service/) diff --git a/content/rancher/v2.5/en/cluster-explorer/workloads/add-a-sidecar/_index.md b/content/rancher/v2.5/en/cluster-explorer/workloads/add-a-sidecar/_index.md deleted file mode 100644 index 2b111ad691c..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/workloads/add-a-sidecar/_index.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Adding a Sidecar -weight: 4 ---- -A _sidecar_ is a container that extends or enhances the main container in a pod. The main container and the sidecar share a pod, and therefore share the same network space and storage. You can add sidecars to existing workloads by using the **Add a Sidecar** option. - -1. From the **Global** view, open the project running the workload you want to add a sidecar to. - -1. Click **Resources > Workloads.** In versions prior to v2.3.0, select the **Workloads** tab. - -1. Find the workload that you want to extend. Select **⋮ icon (...) > Add a Sidecar**. - -1. Enter a **Name** for the sidecar. - -1. Select a **Sidecar Type**. This option determines if the sidecar container is deployed before or after the main container is deployed. - - - **Standard Container:** - - The sidecar container is deployed after the main container. - - - **Init Container:** - - The sidecar container is deployed before the main container. - -1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy in support of the main container. During deployment, Rancher pulls this image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears on Docker Hub. - -1. Set the remaining options. You can read about them in [Deploying Workloads](../deploy-workloads). - -1. Click **Launch**. - -**Result:** The sidecar is deployed according to your parameters. Following its deployment, you can view the sidecar by selecting **⋮ icon (...) > Edit** for the main deployment. - -## Related Links - -- [The Distributed System ToolKit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns/) diff --git a/content/rancher/v2.5/en/cluster-explorer/workloads/deploy-workloads/_index.md b/content/rancher/v2.5/en/cluster-explorer/workloads/deploy-workloads/_index.md deleted file mode 100644 index 5c0ff40810b..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/workloads/deploy-workloads/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Deploying Workloads -description: Read this step by step guide for deploying workloads. Deploy a workload to run an application in one or more containers. -weight: 1 ---- - -Deploy a workload to run an application in one or more containers. - -1. From the **Global** view, open the project that you want to deploy a workload to. - -1. 1. Click **Resources > Workloads.** (In versions prior to v2.3.0, click the **Workloads** tab.) From the **Workloads** view, click **Deploy**. - -1. Enter a **Name** for the workload. - -1. Select a [workload type]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/). The workload defaults to a scalable deployment, by can change the workload type by clicking **More options.** - -1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy to the project, optionally prefacing it with the registry host (e.g. `quay.io`, `registry.gitlab.com`, etc.). During deployment, Rancher pulls this image from the specified public or private registry. If no registry host is provided, Rancher will pull the image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears in the registry server, including any required path, and optionally including the desired tag (e.g. `registry.gitlab.com/user/path/image:tag`). If no tag is provided, the `latest` tag will be automatically used. - -1. Either select an existing [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces), or click **Add to a new namespace** and enter a new namespace. - -1. Click **Add Port** to enter a port mapping, which enables access to the application inside and outside of the cluster . For more information, see [Services]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/#services). - -1. Configure the remaining options: - - - **Environment Variables** - - Use this section to either specify environment variables for your workload to consume on the fly, or to pull them from another source, such as a secret or [ConfigMap]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/). - - - **Node Scheduling** - - **Health Check** - - **Volumes** - - Use this section to add storage for your workload. You can manually specify the volume that you want to add, use a persistent volume claim to dynamically create a volume for the workload, or read data for a volume to use from a file such as a [ConfigMap]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/). - - When you are deploying a Stateful Set, you should use a Volume Claim Template when using Persistent Volumes. This will ensure that Persistent Volumes are created dynamically when you scale your Stateful Set. This option is available in the UI as of Rancher v2.2.0. - - - **Scaling/Upgrade Policy** - - >**Amazon Note for Volumes:** - > - > To mount an Amazon EBS volume: - > - >- In [Amazon AWS](https://aws.amazon.com/), the nodes must be in the same Availability Zone and possess IAM permissions to attach/unattach volumes. - > - >- The cluster must be using the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws) option. For more information on enabling this option see [Creating an Amazon EC2 Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) or [Creating a Custom Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes). - - -1. Click **Show Advanced Options** and configure: - - - **Command** - - **Networking** - - **Labels & Annotations** - - **Security and Host Config** - -1. Click **Launch**. - -**Result:** The workload is deployed to the chosen namespace. You can view the workload's status from the project's **Workloads** view. diff --git a/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/_index.md b/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/_index.md deleted file mode 100644 index 4a1ecdd03e4..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/_index.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: The Horizontal Pod Autoscaler -description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment -weight: 5 ---- - -The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. - -Rancher provides some additional features to help manage HPAs, depending on the version of Rancher. - -You can create, manage, and delete HPAs using the Rancher UI in Rancher v2.3.0-alpha4 and higher versions. It only supports HPA in the `autoscaling/v2beta2` API. - -## Managing HPAs - -The way that you manage HPAs is different based on your version of the Kubernetes API: - -- **For Kubernetes API version autoscaling/V2beta1:** This version of the Kubernetes API lets you autoscale your pods based on the CPU and memory utilization of your application. -- **For Kubernetes API Version autoscaling/V2beta2:** This version of the Kubernetes API lets you autoscale your pods based on CPU and memory utilization, in addition to custom metrics. - -HPAs are also managed differently based on your version of Rancher: - -- **For Rancher v2.3.0+**: You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). -- **For Rancher Prior to v2.3.0:** To manage and configure HPAs, you need to use `kubectl`. For instructions on how to create, manage, and scale HPAs, refer to [Managing HPAs with kubectl]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl). - -You might have additional HPA installation steps if you are using an older version of Rancher: - -- **For Rancher v2.0.7+:** Clusters created in Rancher v2.0.7 and higher automatically have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use HPA. -- **For Rancher Prior to v2.0.7:** Clusters created in Rancher prior to v2.0.7 don't automatically have the requirements needed to use HPA. For instructions on installing HPA for these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7). - -## Testing HPAs with a Service Deployment - -In Rancher v2.3.x+, you can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA.** For more information, refer to [Get HPA Metrics and Status]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/). - -You can also use `kubectl` to get the status of HPAs that you test with your load testing tool. For more information, refer to [Testing HPAs with kubectl] -({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). diff --git a/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/hpa-background/_index.md b/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/hpa-background/_index.md deleted file mode 100644 index d1789809370..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/hpa-background/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Background Information on HPAs -weight: 1 ---- - -The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. This section provides explanation on how HPA works with Kubernetes. - -## Why Use Horizontal Pod Autoscaler? - -Using HPA, you can automatically scale the number of pods within a replication controller, deployment, or replica set up or down. HPA automatically scales the number of pods that are running for maximum efficiency. Factors that affect the number of pods include: - -- A minimum and maximum number of pods allowed to run, as defined by the user. -- Observed CPU/memory use, as reported in resource metrics. -- Custom metrics provided by third-party metrics application like Prometheus, Datadog, etc. - -HPA improves your services by: - -- Releasing hardware resources that would otherwise be wasted by an excessive number of pods. -- Increase/decrease performance as needed to accomplish service level agreements. - -## How HPA Works - -![HPA Schema]({{}}/img/rancher/horizontal-pod-autoscaler.jpg) - -HPA is implemented as a control loop, with a period controlled by the `kube-controller-manager` flags below: - -Flag | Default | Description | ----------|----------|----------| - `--horizontal-pod-autoscaler-sync-period` | `30s` | How often HPA audits resource/custom metrics in a deployment. - `--horizontal-pod-autoscaler-downscale-delay` | `5m0s` | Following completion of a downscale operation, how long HPA must wait before launching another downscale operations. - `--horizontal-pod-autoscaler-upscale-delay` | `3m0s` | Following completion of an upscale operation, how long HPA must wait before launching another upscale operation. - - -For full documentation on HPA, refer to the [Kubernetes Documentation](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/). - -## Horizontal Pod Autoscaler API Objects - -HPA is an API resource in the Kubernetes `autoscaling` API group. The current stable version is `autoscaling/v1`, which only includes support for CPU autoscaling. To get additional support for scaling based on memory and custom metrics, use the beta version instead: `autoscaling/v2beta1`. - -For more information about the HPA API object, see the [HPA GitHub Readme](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object). \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md b/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md deleted file mode 100644 index f0df125ee88..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: Managing HPAs with kubectl -weight: 3 ---- - -This section describes HPA management with `kubectl`. This document has instructions for how to: - -- Create an HPA -- Get information on HPAs -- Delete an HPA -- Configure your HPAs to scale with CPU or memory utilization -- Configure your HPAs to scale using custom metrics, if you use a third-party tool such as Prometheus for metrics - -### Note For Rancher v2.3.x - -In Rancher v2.3.x, you can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. - -### Note For Rancher Prior to v2.0.7 - -Clusters created with older versions of Rancher don't automatically have all the requirements to create an HPA. To install an HPA on these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7). - -##### Basic kubectl Command for Managing HPAs - -If you have an HPA manifest file, you can create, manage, and delete HPAs using `kubectl`: - -- Creating HPA - - - With manifest: `kubectl create -f ` - - - Without manifest (Just support CPU): `kubectl autoscale deployment hello-world --min=2 --max=5 --cpu-percent=50` - -- Getting HPA info - - - Basic: `kubectl get hpa hello-world` - - - Detailed description: `kubectl describe hpa hello-world` - -- Deleting HPA - - - `kubectl delete hpa hello-world` - -##### HPA Manifest Definition Example - -The HPA manifest is the config file used for managing an HPA with `kubectl`. - -The following snippet demonstrates use of different directives in an HPA manifest. See the list below the sample to understand the purpose of each directive. - -```yml -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 100Mi -``` - - -Directive | Description ----------|----------| - `apiVersion: autoscaling/v2beta1` | The version of the Kubernetes `autoscaling` API group in use. This example manifest uses the beta version, so scaling by CPU and memory is enabled. | - `name: hello-world` | Indicates that HPA is performing autoscaling for the `hello-word` deployment. | - `minReplicas: 1` | Indicates that the minimum number of replicas running can't go below 1. | - `maxReplicas: 10` | Indicates the maximum number of replicas in the deployment can't go above 10. - `targetAverageUtilization: 50` | Indicates the deployment will scale pods up when the average running pod uses more than 50% of its requested CPU. - `targetAverageValue: 100Mi` | Indicates the deployment will scale pods up when the average running pod uses more that 100Mi of memory. -
- -##### Configuring HPA to Scale Using Resource Metrics (CPU and Memory) - -Clusters created in Rancher v2.0.7 and higher have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use Horizontal Pod Autoscaler. Run the following commands to check if metrics are available in your installation: - -``` -$ kubectl top nodes -NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% -node-controlplane 196m 9% 1623Mi 42% -node-etcd 80m 4% 1090Mi 28% -node-worker 64m 3% 1146Mi 29% -$ kubectl -n kube-system top pods -NAME CPU(cores) MEMORY(bytes) -canal-pgldr 18m 46Mi -canal-vhkgr 20m 45Mi -canal-x5q5v 17m 37Mi -canal-xknnz 20m 37Mi -kube-dns-7588d5b5f5-298j2 0m 22Mi -kube-dns-autoscaler-5db9bbb766-t24hw 0m 5Mi -metrics-server-97bc649d5-jxrlt 0m 12Mi -$ kubectl -n kube-system logs -l k8s-app=metrics-server -I1002 12:55:32.172841 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true -I1002 12:55:32.172994 1 heapster.go:72] Metrics Server version v0.2.1 -I1002 12:55:32.173378 1 configs.go:61] Using Kubernetes client with master "https://kubernetes.default.svc" and version -I1002 12:55:32.173401 1 configs.go:62] Using kubelet port 10250 -I1002 12:55:32.173946 1 heapster.go:128] Starting with Metric Sink -I1002 12:55:32.592703 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) -I1002 12:55:32.925630 1 heapster.go:101] Starting Heapster API server... -[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] listing is available at https:///swaggerapi -[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ -I1002 12:55:32.928597 1 serve.go:85] Serving securely on 0.0.0.0:443 -``` - -If you have created your cluster in Rancher v2.0.6 or before, please refer to [Manual installation](#manual-installation) - -##### Configuring HPA to Scale Using Custom Metrics with Prometheus - -You can configure HPA to autoscale based on custom metrics provided by third-party software. The most common use case for autoscaling using third-party software is based on application-level metrics (i.e., HTTP requests per second). HPA uses the `custom.metrics.k8s.io` API to consume these metrics. This API is enabled by deploying a custom metrics adapter for the metrics collection solution. - -For this example, we are going to use [Prometheus](https://prometheus.io/). We are beginning with the following assumptions: - -- Prometheus is deployed in the cluster. -- Prometheus is configured correctly and collecting proper metrics from pods, nodes, namespaces, etc. -- Prometheus is exposed at the following URL and port: `http://prometheus.mycompany.io:80` - -Prometheus is available for deployment in the Rancher v2.0 catalog. Deploy it from Rancher catalog if it isn't already running in your cluster. - -For HPA to use custom metrics from Prometheus, package [k8s-prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) is required in the `kube-system` namespace of your cluster. To install `k8s-prometheus-adapter`, we are using the Helm chart available at [banzai-charts](https://github.com/banzaicloud/banzai-charts). - -1. Initialize Helm in your cluster. - ``` - # kubectl -n kube-system create serviceaccount tiller - kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller - helm init --service-account tiller - ``` - -1. Clone the `banzai-charts` repo from GitHub: - ``` - # git clone https://github.com/banzaicloud/banzai-charts - ``` - -1. Install the `prometheus-adapter` chart, specifying the Prometheus URL and port number. - ``` - # helm install --name prometheus-adapter banzai-charts/prometheus-adapter --set prometheus.url="http://prometheus.mycompany.io",prometheus.port="80" --namespace kube-system - ``` - -1. Check that `prometheus-adapter` is running properly. Check the service pod and logs in the `kube-system` namespace. - - 1. Check that the service pod is `Running`. Enter the following command. - ``` - # kubectl get pods -n kube-system - ``` - From the resulting output, look for a status of `Running`. - ``` - NAME READY STATUS RESTARTS AGE - ... - prometheus-adapter-prometheus-adapter-568674d97f-hbzfx 1/1 Running 0 7h - ... - ``` - 1. Check the service logs to make sure the service is running correctly by entering the command that follows. - ``` - # kubectl logs prometheus-adapter-prometheus-adapter-568674d97f-hbzfx -n kube-system - ``` - Then review the log output to confirm the service is running. - {{% accordion id="prometheus-logs" label="Prometheus Adaptor Logs" %}} - ... - I0724 10:18:45.696679 1 round_trippers.go:436] GET https://10.43.0.1:443/api/v1/namespaces/default/pods?labelSelector=app%3Dhello-world 200 OK in 2 milliseconds - I0724 10:18:45.696695 1 round_trippers.go:442] Response Headers: - I0724 10:18:45.696699 1 round_trippers.go:445] Date: Tue, 24 Jul 2018 10:18:45 GMT - I0724 10:18:45.696703 1 round_trippers.go:445] Content-Type: application/json - I0724 10:18:45.696706 1 round_trippers.go:445] Content-Length: 2581 - I0724 10:18:45.696766 1 request.go:836] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/default/pods","resourceVersion":"6237"},"items":[{"metadata":{"name":"hello-world-54764dfbf8-q6l82","generateName":"hello-world-54764dfbf8-","namespace":"default","selfLink":"/api/v1/namespaces/default/pods/hello-world-54764dfbf8-q6l82","uid":"484cb929-8f29-11e8-99d2-067cac34e79c","resourceVersion":"4066","creationTimestamp":"2018-07-24T10:06:50Z","labels":{"app":"hello-world","pod-template-hash":"1032089694"},"annotations":{"cni.projectcalico.org/podIP":"10.42.0.7/32"},"ownerReferences":[{"apiVersion":"extensions/v1beta1","kind":"ReplicaSet","name":"hello-world-54764dfbf8","uid":"4849b9b1-8f29-11e8-99d2-067cac34e79c","controller":true,"blockOwnerDeletion":true}]},"spec":{"volumes":[{"name":"default-token-ncvts","secret":{"secretName":"default-token-ncvts","defaultMode":420}}],"containers":[{"name":"hello-world","image":"rancher/hello-world","ports":[{"containerPort":80,"protocol":"TCP"}],"resources":{"requests":{"cpu":"500m","memory":"64Mi"}},"volumeMounts":[{"name":"default-token-ncvts","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"Always"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"34.220.18.140","securityContext":{},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":300},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":300}]},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:54Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"}],"hostIP":"34.220.18.140","podIP":"10.42.0.7","startTime":"2018-07-24T10:06:50Z","containerStatuses":[{"name":"hello-world","state":{"running":{"startedAt":"2018-07-24T10:06:54Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"rancher/hello-world:latest","imageID":"docker-pullable://rancher/hello-world@sha256:4b1559cb4b57ca36fa2b313a3c7dde774801aa3a2047930d94e11a45168bc053","containerID":"docker://cce4df5fc0408f03d4adf82c90de222f64c302bf7a04be1c82d584ec31530773"}],"qosClass":"Burstable"}}]} - I0724 10:18:45.699525 1 api.go:74] GET http://prometheus-server.prometheus.34.220.18.140.xip.io/api/v1/query?query=sum%28rate%28container_fs_read_seconds_total%7Bpod_name%3D%22hello-world-54764dfbf8-q6l82%22%2Ccontainer_name%21%3D%22POD%22%2Cnamespace%3D%22default%22%7D%5B5m%5D%29%29+by+%28pod_name%29&time=1532427525.697 200 OK - I0724 10:18:45.699620 1 api.go:93] Response Body: {"status":"success","data":{"resultType":"vector","result":[{"metric":{"pod_name":"hello-world-54764dfbf8-q6l82"},"value":[1532427525.697,"0"]}]}} - I0724 10:18:45.699939 1 wrap.go:42] GET /apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/%2A/fs_read?labelSelector=app%3Dhello-world: (12.431262ms) 200 [[kube-controller-manager/v1.10.1 (linux/amd64) kubernetes/d4ab475/system:serviceaccount:kube-system:horizontal-pod-autoscaler] 10.42.0.0:24268] - I0724 10:18:51.727845 1 request.go:836] Request Body: {"kind":"SubjectAccessReview","apiVersion":"authorization.k8s.io/v1beta1","metadata":{"creationTimestamp":null},"spec":{"nonResourceAttributes":{"path":"/","verb":"get"},"user":"system:anonymous","group":["system:unauthenticated"]},"status":{"allowed":false}} - ... - {{% /accordion %}} - - - -1. Check that the metrics API is accessible from kubectl. - - - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. - ``` - # kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1 - ``` - If the API is accessible, you should receive output that's similar to what follows. - {{% accordion id="custom-metrics-api-response" label="API Response" %}} - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} - {{% /accordion %}} - - - If you are accessing the cluster through Rancher, enter your Server URL in the kubectl config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. - ``` - # kubectl get --raw /k8s/clusters//apis/custom.metrics.k8s.io/v1beta1 - ``` - If the API is accessible, you should receive output that's similar to what follows. - {{% accordion id="custom-metrics-api-response-rancher" label="API Response" %}} - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} - {{% /accordion %}} \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md b/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md deleted file mode 100644 index 1c7d8ed3aad..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Managing HPAs with the Rancher UI -weight: 2 ---- - -The Rancher UI supports creating, managing, and deleting HPAs. You can configure CPU or memory usage as the metric that the HPA uses to scale. - -If you want to create HPAs that scale based on other metrics than CPU and memory, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). - -## Creating an HPA - -1. From the **Global** view, open the project that you want to deploy a HPA to. - -1. Click **Resources > HPA.** - -1. Click **Add HPA.** - -1. Enter a **Name** for the HPA. - -1. Select a **Namespace** for the HPA. - -1. Select a **Deployment** as scale target for the HPA. - -1. Specify the **Minimum Scale** and **Maximum Scale** for the HPA. - -1. Configure the metrics for the HPA. You can choose memory or CPU usage as the metric that will cause the HPA to scale the service up or down. In the **Quantity** field, enter the percentage of the workload's memory or CPU usage that will cause the HPA to scale the service. To configure other HPA metrics, including metrics available from Prometheus, you need to [manage HPAs using kubectl]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). - -1. Click **Create** to create the HPA. - -> **Result:** The HPA is deployed to the chosen namespace. You can view the HPA's status from the project's Resources > HPA view. - -## Get HPA Metrics and Status - -1. From the **Global** view, open the project with the HPAs you want to look at. - -1. Click **Resources > HPA.** The **HPA** tab shows the number of current replicas. - -1. For more detailed metrics and status of a specific HPA, click the name of the HPA. This leads to the HPA detail page. - - -## Deleting an HPA - -1. From the **Global** view, open the project that you want to delete an HPA from. - -1. Click **Resources > HPA.** - -1. Find the HPA which you would like to delete. - -1. Click **⋮ > Delete**. - -1. Click **Delete** to confirm. - -> **Result:** The HPA is deleted from the current cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/testing-hpa/_index.md b/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/testing-hpa/_index.md deleted file mode 100644 index 09e689fcfa1..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/workloads/horitzontal-pod-autoscaler/testing-hpa/_index.md +++ /dev/null @@ -1,491 +0,0 @@ ---- -title: Testing HPAs with kubectl -weight: 4 ---- - -This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/). - -For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. - -1. Configure `kubectl` to connect to your Kubernetes cluster. - -2. Copy the `hello-world` deployment manifest below. -{{% accordion id="hello-world" label="Hello World Manifest" %}} -``` -apiVersion: apps/v1beta2 -kind: Deployment -metadata: - labels: - app: hello-world - name: hello-world - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - app: hello-world - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - metadata: - labels: - app: hello-world - spec: - containers: - - image: rancher/hello-world - imagePullPolicy: Always - name: hello-world - resources: - requests: - cpu: 500m - memory: 64Mi - ports: - - containerPort: 80 - protocol: TCP - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - name: hello-world - namespace: default -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 80 - selector: - app: hello-world -``` -{{% /accordion %}} - -1. Deploy it to your cluster. - - ``` - # kubectl create -f - ``` - -1. Copy one of the HPAs below based on the metric type you're using: -{{% accordion id="service-deployment-resource-metrics" label="Hello World HPA: Resource Metrics" %}} -``` -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world - namespace: default -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 1000Mi -``` -{{% /accordion %}} -{{% accordion id="service-deployment-custom-metrics" label="Hello World HPA: Custom Metrics" %}} -``` -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world - namespace: default -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 100Mi - - type: Pods - pods: - metricName: cpu_system - targetAverageValue: 20m -``` -{{% /accordion %}} - -1. View the HPA info and description. Confirm that metric data is shown. - {{% accordion id="hpa-info-resource-metrics" label="Resource Metrics" %}} -1. Enter the following commands. - ``` - # kubectl get hpa - NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE - hello-world Deployment/hello-world 1253376 / 100Mi, 0% / 50% 1 10 1 6m - # kubectl describe hpa - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 20:21:16 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 1253376 / 100Mi - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - ``` - {{% /accordion %}} - {{% accordion id="hpa-info-custom-metrics" label="Custom Metrics" %}} -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive the output that follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:36:28 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 3514368 / 100Mi - "cpu_system" on pods: 0 / 20m - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - ``` - {{% /accordion %}} - - -1. Generate a load for the service to test that your pods autoscale as intended. You can use any load-testing tool (Hey, Gatling, etc.), but we're using [Hey](https://github.com/rakyll/hey). - -1. Test that pod autoscaling works as intended.

- **To Test Autoscaling Using Resource Metrics:** - {{% accordion id="observe-upscale-2-pods-cpu" label="Upscale to 2 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to scale up to two pods based on CPU Usage. - -1. View your HPA. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 10928128 / 100Mi - resource cpu on pods (as a percentage of request): 56% (280m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm you've scaled to two pods. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows: - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-k8ph2 1/1 Running 0 1m - hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h - ``` - {{% /accordion %}} - {{% accordion id="observe-upscale-3-pods-cpu-cooldown" label="Upscale to 3 pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale to 3 pods based on CPU usage with `horizontal-pod-autoscaler-upscale-delay` set to 3 minutes. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 9424896 / 100Mi - resource cpu on pods (as a percentage of request): 66% (333m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 4m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target - ``` -2. Enter the following command to confirm three pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-f46kh 0/1 Running 0 1m - hello-world-54764dfbf8-k8ph2 1/1 Running 0 5m - hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h - ``` - {{% /accordion %}} - {{% accordion id="observe-downscale-1-pod" label="Downscale to 1 Pod: All Metrics Below Target" %}} -Use your load testing to scale down to 1 pod when all metrics are below target for `horizontal-pod-autoscaler-downscale-delay` (5 minutes by default). - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 10070016 / 100Mi - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 6m horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 1s horizontal-pod-autoscaler New size: 1; reason: All metrics below target - ``` - {{% /accordion %}} -
-**To Test Autoscaling Using Custom Metrics:** - {{% accordion id="custom-observe-upscale-2-pods-cpu" label="Upscale to 2 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale two pods based on CPU usage. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8159232 / 100Mi - "cpu_system" on pods: 7m / 20m - resource cpu on pods (as a percentage of request): 64% (321m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm two pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-5pfdr 1/1 Running 0 3s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` - {{% /accordion %}} -{{% accordion id="observe-upscale-3-pods-cpu-cooldown-2" label="Upscale to 3 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to scale up to three pods when the cpu_system usage limit is up to target. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows: - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8374272 / 100Mi - "cpu_system" on pods: 27m / 20m - resource cpu on pods (as a percentage of request): 71% (357m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 3s horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - ``` -1. Enter the following command to confirm three pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows: - ``` - # kubectl get pods - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-5pfdr 1/1 Running 0 3m - hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} -{{% accordion id="observe-upscale-4-pods" label="Upscale to 4 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale to four pods based on CPU usage. `horizontal-pod-autoscaler-upscale-delay` is set to three minutes by default. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8374272 / 100Mi - "cpu_system" on pods: 27m / 20m - resource cpu on pods (as a percentage of request): 71% (357m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - Normal SuccessfulRescale 4s horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm four pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-2p9xb 1/1 Running 0 5m - hello-world-54764dfbf8-5pfdr 1/1 Running 0 2m - hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} -{{% accordion id="custom-metrics-observe-downscale-1-pod" label="Downscale to 1 Pod: All Metrics Below Target" %}} -Use your load testing tool to scale down to one pod when all metrics below target for `horizontal-pod-autoscaler-downscale-delay`. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive similar output to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8101888 / 100Mi - "cpu_system" on pods: 8m / 20m - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 8m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 1; reason: All metrics below target - ``` -1. Enter the following command to confirm a single pods is running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-explorer/workloads/rollback-workloads/_index.md b/content/rancher/v2.5/en/cluster-explorer/workloads/rollback-workloads/_index.md deleted file mode 100644 index a6701d94bd1..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/workloads/rollback-workloads/_index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Rolling Back Workloads -weight: 3 ---- - -Sometimes there is a need to rollback to the previous version of the application, either for debugging purposes or because an upgrade did not go as planned. - -1. From the **Global** view, open the project running the workload you want to rollback. - -1. Find the workload that you want to rollback and select **Vertical ⋮ (... ) > Rollback**. - -1. Choose the revision that you want to roll back to. Click **Rollback**. - -**Result:** Your workload reverts to the previous version that you chose. Wait a few minutes for the action to complete. diff --git a/content/rancher/v2.5/en/cluster-explorer/workloads/upgrade-workloads/_index.md b/content/rancher/v2.5/en/cluster-explorer/workloads/upgrade-workloads/_index.md deleted file mode 100644 index 521e8d2901b..00000000000 --- a/content/rancher/v2.5/en/cluster-explorer/workloads/upgrade-workloads/_index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Upgrading Workloads -weight: 2 ---- -When a new version of an application image is released on Docker Hub, you can upgrade any workloads running a previous version of the application to the new one. - -1. From the **Global** view, open the project running the workload you want to upgrade. - -1. Find the workload that you want to upgrade and select **Vertical ⋮ (... ) > Edit**. - -1. Update the **Docker Image** to the updated version of the application image on Docker Hub. - -1. Update any other options that you want to change. - -1. Review and edit the workload's **Scaling/Upgrade** policy. - - These options control how the upgrade rolls out to containers that are currently running. For example, for scalable deployments, you can chose whether you want to stop old pods before deploying new ones, or vice versa, as well as the upgrade batch size. - -1. Click **Upgrade**. - -**Result:** The workload begins upgrading its containers, per your specifications. Note that scaling up the deployment or updating the upgrade/scaling policy won't result in the pods recreation. diff --git a/content/rancher/v2.5/en/contributing/_index.md b/content/rancher/v2.5/en/contributing/_index.md deleted file mode 100644 index 69c09d3974a..00000000000 --- a/content/rancher/v2.5/en/contributing/_index.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: Contributing to Rancher -weight: 22 ---- - -This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. - -For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: - -- How to set up the Rancher development environment and run tests -- The typical flow of an issue through the development lifecycle -- Coding guidelines and development best practices -- Debugging and troubleshooting -- Developing the Rancher API - -On the Rancher Users Slack, the channel for developers is **#developer**. - -# Repositories - -All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. - -Repository | URL | Description ------------|-----|------------- -Rancher | https://github.com/rancher/rancher | This repository is the main source code for Rancher 2.x. -Types | https://github.com/rancher/types | This repository is the repository that has all the API types for Rancher 2.x. -API Framework | https://github.com/rancher/norman | This repository is an API framework for building Rancher style APIs backed by Kubernetes Custom Resources. -User Interface | https://github.com/rancher/ui | This repository is the source of the UI. -(Rancher) Docker Machine | https://github.com/rancher/machine | This repository is the source of the Docker Machine binary used when using Node Drivers. This is a fork of the `docker/machine` repository. -machine-package | https://github.com/rancher/machine-package | This repository is used to build the Rancher Docker Machine binary. -kontainer-engine | https://github.com/rancher/kontainer-engine | This repository is the source of kontainer-engine, the tool to provision hosted Kubernetes clusters. -RKE repository | https://github.com/rancher/rke | This repository is the source of Rancher Kubernetes Engine, the tool to provision Kubernetes clusters on any machine. -CLI | https://github.com/rancher/cli | This repository is the source code for the Rancher CLI used in Rancher 2.x. -(Rancher) Helm repository | https://github.com/rancher/helm | This repository is the source of the packaged Helm binary. This is a fork of the `helm/helm` repository. -Telemetry repository | https://github.com/rancher/telemetry | This repository is the source for the Telemetry binary. -loglevel repository | https://github.com/rancher/loglevel | This repository is the source of the loglevel binary, used to dynamically change log levels. - -To see all libraries/projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. - -![Rancher diagram]({{}}/img/rancher/ranchercomponentsdiagram.svg)
-Rancher components used for provisioning/managing Kubernetes clusters. - -# Building - -Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. - -The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. - -# Bugs, Issues or Questions - -If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. - -If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). - -### Checklist for Filing Issues - -Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. - ->**Note:** For large amounts of data, please use [GitHub Gist](https://gist.github.com/) or similar and link the created resource in the issue. ->**Important:** Please remove any sensitive data as it will be publicly viewable. - -- **Resources:** Provide as much as detail as possible on the used resources. As the source of the issue can be many things, including as much of detail as possible helps to determine the root cause. See some examples below: - - **Hosts:** What specifications does the host have, like CPU/memory/disk, what cloud does it happen on, what Amazon Machine Image are you using, what DigitalOcean droplet are you using, what image are you provisioning that we can rebuild or use when we try to reproduce - - **Operating System:** What operating system are you using? Providing specifics helps here like the output of `cat /etc/os-release` for exact OS release and `uname -r` for exact kernel used - - **Docker:** What Docker version are you using, how did you install it? Most of the details of Docker can be found by supplying output of `docker version` and `docker info` - - **Environment:** Are you in a proxy environment, are you using recognized CA/self signed certificates, are you using an external loadbalancer - - **Rancher:** What version of Rancher are you using, this can be found on the bottom left of the UI or be retrieved from the image tag you are running on the host - - **Clusters:** What kind of cluster did you create, how did you create it, what did you specify when you were creating it -- **Steps to reproduce the issue:** Provide as much detail on how you got into the reported situation. This helps the person to reproduce the situation you are in. - - Provide manual steps or automation scripts used to get from a newly created setup to the situation you reported. -- **Logs:** Provide data/logs from the used resources. - - Rancher - - Docker install - - ``` - docker logs \ - --timestamps \ - $(docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk '{ print $1 }') - ``` - - Kubernetes install using `kubectl` - - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` if Rancher is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. - - ``` - kubectl -n cattle-system \ - logs \ - -l app=rancher \ - --timestamps=true - ``` - - Docker install using `docker` on each of the nodes in the RKE cluster - - ``` - docker logs \ - --timestamps \ - $(docker ps | grep -E "rancher/rancher@|rancher_rancher" | awk '{ print $1 }') - ``` - - Kubernetes Install with RKE Add-On - - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` if the Rancher server is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. - - ``` - kubectl -n cattle-system \ - logs \ - --timestamps=true \ - -f $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name="cattle-server") | .metadata.name') - ``` - - System logging (these might not all exist, depending on operating system) - - `/var/log/messages` - - `/var/log/syslog` - - `/var/log/kern.log` - - Docker daemon logging (these might not all exist, depending on operating system) - - `/var/log/docker.log` -- **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. - -# Docs - -If you have any updates to our documentation, please make any pull request to our docs repo. - -- [Rancher 2.x Docs repository](https://github.com/rancher/docs): This repo is where all the docs for Rancher 2.x are located. They are located in the `content` folder in the repo. - -- [Rancher 1.x Docs repository](https://github.com/rancher/rancher.github.io): This repo is where all the docs for Rancher 1.x are located. They are located in the `rancher` folder in the repo. diff --git a/content/rancher/v2.5/en/ecm/_index.md b/content/rancher/v2.5/en/ecm/_index.md deleted file mode 100644 index 0e359ce5891..00000000000 --- a/content/rancher/v2.5/en/ecm/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Enterprise Cluster Manager -weight: 6 ---- - -After installation, the [system administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. - -## First Log In - -After you log into Rancher for the first time, Rancher will prompt you for a **Rancher Server URL**.You should set the URL to the main entry point to the Rancher Server. When a load balancer sits in front a Rancher Server cluster, the URL should resolve to the load balancer. The system will automatically try to infer the Rancher Server URL from the IP address or host name of the host running the Rancher Server. This is only correct if you are running a single node Rancher Server installation. In most cases, therefore, you need to set the Rancher Server URL to the correct value yourself. - ->**Important!** After you set the Rancher Server URL, we do not support updating it. Set the URL with extreme care. - -## Authentication - -One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows to set up local users and/or connect to an external authentication provider. By connecting to an external authentication provider, you can leverage that provider's user and groups. - -For more information how authentication works and how to configure each provider, see [Authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/). - -## Authorization - -Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by the user's role. Rancher provides built-in roles to allow you to easily configure a user's permissions to resources, but Rancher also provides the ability to customize the roles for each Kubernetes resource. - -For more information how authorization works and how to customize roles, see [Roles Based Access Control (RBAC)]({{}}/rancher/v2.x/en/admin-settings/rbac/). - -## Pod Security Policies - -_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification, e.g. root privileges. If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message. - -For more information how to create and use PSPs, see [Pod Security Policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/). - -## Provisioning Drivers - -Drivers in Rancher allow you to manage which providers can be used to provision [hosted Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. - -For more information, see [Provisioning Drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/). - -## Adding Kubernetes Versions into Rancher - -With this feature, you can upgrade to the latest version of Kubernetes as soon as it is released, without upgrading Rancher. This feature allows you to easily upgrade Kubernetes patch versions (i.e. `v1.15.X`), but not intended to upgrade Kubernetes minor versions (i.e. `v1.X.0`) as Kubernetes tends to deprecate or add APIs between minor versions. - -The information that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) is now located in the Rancher Kubernetes Metadata. For details on metadata configuration and how to change the Kubernetes version used for provisioning RKE clusters, see [Rancher Kubernetes Metadata.]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata/) - -Rancher Kubernetes Metadata contains Kubernetes version information which Rancher uses to provision [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata/). - -## Enabling Experimental Features - -Rancher includes some features that are experimental and disabled by default. Feature flags were introduced to allow you to try these features. For more information, refer to the section about [feature flags.]({{}}/rancher/v2.x/en/admin-settings/feature-flags/) diff --git a/content/rancher/v2.5/en/ecm/access-control/_index.md b/content/rancher/v2.5/en/ecm/access-control/_index.md deleted file mode 100644 index 199a0368e3f..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Access Control -weight: 1 ---- - -> This section is under construction. - -There are many ways you can interact with Kubernetes clusters that are managed by Rancher: - -- **Rancher UI** - - Rancher provides an intuitive user interface for interacting with your clusters. All options available in the UI use the Rancher API. Therefore any action possible in the UI is also possible in the Rancher CLI or Rancher API. - -- **kubectl** - - You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), to manage your clusters. You have two options for using kubectl: - - - **Rancher kubectl shell** - - Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. - - For more information, see [Accessing Clusters with kubectl Shell]({{}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-shell). - - - **Terminal remote connection** - - You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. - - For more information, see [Accessing Clusters with kubectl and a kubeconfig File]({{}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-and-a-kubeconfig-file). - -- **Rancher CLI** - - You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI]({{}}/rancher/v2.x/en/cli/). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. - -- **Rancher API** - - Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key]({{}}/rancher/v2.x/en/user-settings/api-keys/). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/_index.md deleted file mode 100644 index d8765f574e4..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/_index.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: Authentication Providers -weight: 1 ---- - -> This section is under construction. - -One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows your users to use one set of credentials to authenticate with any of your Kubernetes clusters. - -This centralized user authentication is accomplished using the Rancher authentication proxy, which is installed along with the rest of Rancher. This proxy authenticates your users and forwards their requests to your Kubernetes clusters using a service account. - - - -## External vs. Local Authentication - -The Rancher authentication proxy integrates with the following external authentication services. The following table lists the first version of Rancher each service debuted. - -| Auth Service | Available as of | -| ------------------------------------------------------------------------------------------------ | ---------------- | -| [Microsoft Active Directory]({{}}/rancher/v2.x/en/admin-settings/authentication/ad/) | v2.0.0 | -| [GitHub]({{}}/rancher/v2.x/en/admin-settings/authentication/github/) | v2.0.0 | -| [Microsoft Azure AD]({{}}/rancher/v2.x/en/admin-settings/authentication/azure-ad/) | v2.0.3 | -| [FreeIPA]({{}}/rancher/v2.x/en/admin-settings/authentication/freeipa/) | v2.0.5 | -| [OpenLDAP]({{}}/rancher/v2.x/en/admin-settings/authentication/openldap/) | v2.0.5 | -| [Microsoft AD FS]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/) | v2.0.7 | -| [PingIdentity]({{}}/rancher/v2.x/en/admin-settings/authentication/ping-federate/) | v2.0.7 | -| [Keycloak]({{}}/rancher/v2.x/en/admin-settings/authentication/keycloak/) | v2.1.0 | -| [Okta]({{}}/rancher/v2.x/en/admin-settings/authentication/okta/) | v2.2.0 | -| [Google OAuth]({{}}/rancher/v2.x/en/admin-settings/authentication/google/) | v2.3.0 | -| [Shibboleth]({{}}/rancher/v2.x/en/admin-settings/authentication/shibboleth) | v2.4.0 | - -
-However, Rancher also provides [local authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/local/). - -In most cases, you should use an external authentication service over local authentication, as external authentication allows user management from a central location. However, you may want a few local authentication users for managing Rancher under rare circumstances, such as if your external authentication provider is unavailable or undergoing maintenance. - -## Users and Groups - -Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When authenticating with an external provider, groups are provided from the external provider based on the user. These users and groups are given specific roles to resources like clusters, projects, multi-cluster apps, and global DNS providers and entries. When you give access to a group, all users who are a member of that group in the authentication provider will be able to access the resource with the permissions that you've specified. For more information on roles and permissions, see [Role Based Access Control]({{}}/rancher/v2.x/en/admin-settings/rbac/). - -> **Note:** Local authentication does not support creating or managing groups. - -For more information, see [Users and Groups]({{}}/rancher/v2.x/en/admin-settings/authentication/user-groups/) - -## Scope of Rancher Authorization - -After you configure Rancher to allow sign on using an external authentication service, you should configure who should be allowed to log in and use Rancher. The following options are available: - -| Access Level | Description | -|----------------------------------------------|-------------| -| Allow any valid Users | _Any_ user in the authorization service can access Rancher. We generally discourage use of this setting! | -| Allow members of Clusters, Projects, plus Authorized Users and Organizations | Any user in the authorization service and any group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any user in the authentication service or group you add to the **Authorized Users and Organizations** list may log in to Rancher. | -| Restrict access to only Authorized Users and Organizations | Only users in the authentication service or groups added to the Authorized Users and Organizations can log in to Rancher. | - -To set the Rancher access level for users in the authorization service, follow these steps: - -1. From the **Global** view, click **Security > Authentication.** - -1. Use the **Site Access** options to configure the scope of user authorization. The table above explains the access level for each option. - -1. Optional: If you choose an option other than **Allow any valid Users,** you can add users to the list of authorized users and organizations by searching for them in the text field that appears. - -1. Click **Save.** - -**Result:** The Rancher access configuration settings are applied. - -{{< saml_caveats >}} - -## External Authentication Configuration and Principal Users - -Configuration of external authentication requires: - -- A local user assigned the administrator role, called hereafter the _local principal_. -- An external user that can authenticate with your external authentication service, called hereafter the _external principal_. - -Configuration of external authentication affects how principal users are managed within Rancher. Follow the list below to better understand these effects. - -1. Sign into Rancher as the local principal and complete configuration of external authentication. - - ![Sign In]({{}}/img/rancher/sign-in.png) - -2. Rancher associates the external principal with the local principal. These two users share the local principal's user ID. - - ![Principal ID Sharing]({{}}/img/rancher/principal-ID.png) - -3. After you complete configuration, Rancher automatically signs out the local principal. - - ![Sign Out Local Principal]({{}}/img/rancher/sign-out-local.png) - -4. Then, Rancher automatically signs you back in as the external principal. - - ![Sign In External Principal]({{}}/img/rancher/sign-in-external.png) - -5. Because the external principal and the local principal share an ID, no unique object for the external principal displays on the Users page. - - ![Sign In External Principal]({{}}/img/rancher/users-page.png) - -6. The external principal and the local principal share the same access rights. diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/ad/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/ad/_index.md deleted file mode 100644 index 757fe51cc35..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/ad/_index.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: Configuring Active Directory (AD) -weight: 2 ---- - -If your organization uses Microsoft Active Directory as central user repository, you can configure Rancher to communicate with an Active Directory server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the Active Directory, while allowing end-users to authenticate with their AD credentials when logging in to the Rancher UI. - -Rancher uses LDAP to communicate with the Active Directory server. The authentication flow for Active Directory is therefore the same as for the [OpenLDAP authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/openldap) integration. - -> **Note:** -> -> Before you start, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -## Prerequisites - -You'll need to create or obtain from your AD administrator a new AD user to use as service account for Rancher. This user must have sufficient permissions to perform LDAP searches and read attributes of users and groups under your AD domain. - -Usually a (non-admin) **Domain User** account should be used for this purpose, as by default such user has read-only privileges for most objects in the domain partition. - -Note however, that in some locked-down Active Directory configurations this default behaviour may not apply. In such case you will need to ensure that the service account user has at least **Read** and **List Content** permissions granted either on the Base OU (enclosing users and groups) or globally for the domain. - -> **Using TLS?** -> -> If the certificate used by the AD server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -## Configuration Steps -### Open Active Directory Configuration - -1. Log into the Rancher UI using the initial local `admin` account. -2. From the **Global** view, navigate to **Security** > **Authentication** -3. Select **Active Directory**. The **Configure an AD server** form will be displayed. - -### Configure Active Directory Server Settings - -In the section titled `1. Configure an Active Directory server`, complete the fields with the information specific to your Active Directory server. Please refer to the following table for detailed information on the required values for each parameter. - -> **Note:** -> -> If you are unsure about the correct values to enter in the user/group Search Base field, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch). - -**Table 1: AD Server parameters** - -| Parameter | Description | -|:--|:--| -| Hostname | Specify the hostname or IP address of the AD server | -| Port | Specify the port at which the Active Directory server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| -| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS).| -| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the AD server unreachable. | -| Service Account Username | Enter the username of an AD account with read-only access to your domain partition (see [Prerequisites](#prerequisites)). The username can be entered in NetBIOS format (e.g. "DOMAIN\serviceaccount") or UPN format (e.g. "serviceaccount@domain.com"). | -| Service Account Password | The password for the service account. | -| Default Login Domain | When you configure this field with the NetBIOS name of your AD domain, usernames entered without a domain (e.g. "jdoe") will automatically be converted to a slashed, NetBIOS logon (e.g. "LOGIN_DOMAIN\jdoe") when binding to the AD server. If your users authenticate with the UPN (e.g. "jdoe@acme.com") as username then this field **must** be left empty. | -| User Search Base | The Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| -| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave it empty. For example: "ou=groups,dc=acme,dc=com".| - ---- - -### Configure User/Group Schema - -In the section titled `2. Customize Schema` you must provide Rancher with a correct mapping of user and group attributes corresponding to the schema used in your directory. - -Rancher uses LDAP queries to search for and retrieve information about users and groups within the Active Directory. The attribute mappings configured in this section are used to construct search filters and resolve group membership. It is therefore paramount that the provided settings reflect the reality of your AD domain. - -> **Note:** -> -> If you are unfamiliar with the schema used in your Active Directory domain, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch) to determine the correct configuration values. - -#### User Schema - -The table below details the parameters for the user schema section configuration. - -**Table 2: User schema configuration parameters** - -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Username Attribute | The user attribute whose value is suitable as a display name. | -| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. If your users authenticate with their UPN (e.g. "jdoe@acme.com") as username then this field must normally be set to `userPrincipalName`. Otherwise for the old, NetBIOS-style logon names (e.g. "jdoe") it's usually `sAMAccountName`. | -| User Member Attribute | The attribute containing the groups that a user is a member of. | -| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the AD server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. To match UPN usernames (e.g. jdoe@acme.com) you should usually set the value of this field to `userPrincipalName`. | -| Search Filter | This filter gets applied to the list of users that is searched when Rancher attempts to add users to a site access list or tries to add members to clusters or projects. For example, a user search filter could be (|(memberOf=CN=group1,CN=Users,DC=testad,DC=rancher,DC=io)(memberOf=CN=group2,CN=Users,DC=testad,DC=rancher,DC=io)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of users will be empty. | -| User Enabled Attribute | The attribute containing an integer value representing a bitwise enumeration of user account flags. Rancher uses this to determine if a user account is disabled. You should normally leave this set to the AD standard `userAccountControl`. | -| Disabled Status Bitmask | This is the value of the `User Enabled Attribute` designating a disabled user account. You should normally leave this set to the default value of "2" as specified in the Microsoft Active Directory schema (see [here](https://docs.microsoft.com/en-us/windows/desktop/adschema/a-useraccountcontrol#remarks)). | - ---- - -#### Group Schema - -The table below details the parameters for the group schema configuration. - -**Table 3: Group schema configuration parameters** - -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for group objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Name Attribute | The group attribute whose value is suitable for a display name. | -| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | -| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | -| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects. See description of user schema `Search Attribute`. | -| Search Filter | This filter gets applied to the list of groups that is searched when Rancher attempts to add groups to a site access list or tries to add groups to clusters or projects. For example, a group search filter could be (|(cn=group1)(cn=group2)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of groups will be empty. | -| Group DN Attribute | The name of the group attribute whose format matches the values in the user attribute describing a the user's memberships. See `User Member Attribute`. | -| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organisation makes use of these nested memberships (ie. you have groups that contain other groups as members). | - ---- - -### Test Authentication - -Once you have completed the configuration, proceed by testing the connection to the AD server **using your AD admin account**. If the test is successful, authentication with the configured Active Directory will be enabled implicitly with the account you test with set as admin. - -> **Note:** -> -> The AD user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which AD account you use to perform this step. - -1. Enter the **username** and **password** for the AD account that should be mapped to the local principal account. -2. Click **Authenticate with Active Directory** to finalise the setup. - -**Result:** - -- Active Directory authentication has been enabled. -- You have been signed into Rancher as administrator using the provided AD credentials. - -> **Note:** -> -> You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. - -## Annex: Identify Search Base and Schema using ldapsearch - -In order to successfully configure AD authentication it is crucial that you provide the correct configuration pertaining to the hierarchy and schema of your AD server. - -The [`ldapsearch`](http://manpages.ubuntu.com/manpages/artful/man1/ldapsearch.1.html) tool allows you to query your AD server to learn about the schema used for user and group objects. - -For the purpose of the example commands provided below we will assume: - -- The Active Directory server has a hostname of `ad.acme.com` -- The server is listening for unencrypted connections on port `389` -- The Active Directory domain is `acme` -- You have a valid AD account with the username `jdoe` and password `secret` - -### Identify Search Base - -First we will use `ldapsearch` to identify the Distinguished Name (DN) of the parent node(s) for users and groups: - -``` -$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ --h ad.acme.com -b "dc=acme,dc=com" -s sub "sAMAccountName=jdoe" -``` - -This command performs an LDAP search with the search base set to the domain root (`-b "dc=acme,dc=com"`) and a filter targeting the user account (`sAMAccountNam=jdoe`), returning the attributes for said user: - -{{< img "/img/rancher/ldapsearch-user.png" "LDAP User">}} - -Since in this case the user's DN is `CN=John Doe,CN=Users,DC=acme,DC=com` [5], we should configure the **User Search Base** with the parent node DN `CN=Users,DC=acme,DC=com`. - -Similarly, based on the DN of the group referenced in the **memberOf** attribute [4], the correct value for the **Group Search Base** would be the parent node of that value, ie. `OU=Groups,DC=acme,DC=com`. - -### Identify User Schema - -The output of the above `ldapsearch` query also allows to determine the correct values to use in the user schema configuration: - -- `Object Class`: **person** [1] -- `Username Attribute`: **name** [2] -- `Login Attribute`: **sAMAccountName** [3] -- `User Member Attribute`: **memberOf** [4] - -> **Note:** -> -> If the AD users in our organisation were to authenticate with their UPN (e.g. jdoe@acme.com) instead of the short logon name, then we would have to set the `Login Attribute` to **userPrincipalName** instead. - -We'll also set the `Search Attribute` parameter to **sAMAccountName|name**. That way users can be added to clusters/projects in the Rancher UI either by entering their username or full name. - -### Identify Group Schema - -Next, we'll query one of the groups associated with this user, in this case `CN=examplegroup,OU=Groups,DC=acme,DC=com`: - -``` -$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ --h ad.acme.com -b "ou=groups,dc=acme,dc=com" \ --s sub "CN=examplegroup" -``` - -This command will inform us on the attributes used for group objects: - -{{< img "/img/rancher/ldapsearch-group.png" "LDAP Group">}} - -Again, this allows us to determine the correct values to enter in the group schema configuration: - -- `Object Class`: **group** [1] -- `Name Attribute`: **name** [2] -- `Group Member Mapping Attribute`: **member** [3] -- `Search Attribute`: **sAMAccountName** [4] - -Looking at the value of the **member** attribute, we can see that it contains the DN of the referenced user. This corresponds to the **distinguishedName** attribute in our user object. Accordingly will have to set the value of the `Group Member User Attribute` parameter to this attribute. - -In the same way, we can observe that the value in the **memberOf** attribute in the user object corresponds to the **distinguishedName** [5] of the group. We therefore need to set the value for the `Group DN Attribute` parameter to this attribute. - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the Active Directory server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/azure-ad/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/azure-ad/_index.md deleted file mode 100644 index 7353942296a..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/azure-ad/_index.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -title: Configuring Azure AD -weight: 5 ---- - -If you have an instance of Active Directory (AD) hosted in Azure, you can configure Rancher to allow your users to log in using their AD accounts. Configuration of Azure AD external authentication requires you to make configurations in both Azure and Rancher. - ->**Note:** Azure AD integration only supports Service Provider initiated logins. - ->**Prerequisite:** Have an instance of Azure AD configured. - ->**Note:** Most of this procedure takes place from the [Microsoft Azure Portal](https://portal.azure.com/). - -## Azure Active Directory Configuration Outline - -Configuring Rancher to allow your users to authenticate with their Azure AD accounts involves multiple procedures. Review the outline below before getting started. - - - ->**Tip:** Before you start, we recommend creating an empty text file. You can use this file to copy values from Azure that you'll paste into Rancher later. - - - -- [1. Register Rancher with Azure](#1-register-rancher-with-azure) -- [2. Create an Azure API Key](#2-create-an-azure-api-key) -- [3. Set Required Permissions for Rancher](#3-set-required-permissions-for-rancher) -- [4. Copy Azure Application Data](#4-copy-azure-application-data) -- [5. Configure Azure AD in Rancher](#5-configure-azure-ad-in-rancher) - - - -### 1. Register Rancher with Azure - -Before enabling Azure AD within Rancher, you must register Rancher with Azure. - -1. Log in to [Microsoft Azure](https://portal.azure.com/) as an administrative user. Configuration in future steps requires administrative access rights. - -1. Use search to open the **App registrations** service. - - ![Open App Registrations]({{}}/img/rancher/search-app-registrations.png) - -1. Click **New registrations** and complete the **Create** form. - - ![New App Registration]({{}}/img/rancher/new-app-registration.png) - - 1. Enter a **Name** (something like `Rancher`). - - 1. From **Supported account types**, select "Accounts in this organizational directory only (AzureADTest only - Single tenant)" This corresponds to the legacy app registration options. - - 1. In the **Redirect URI** section, make sure **Web** is selected from the dropdown and enter the URL of your Rancher Server in the text box next to the dropdown. This Rancher server URL should be appended with the verification path: `/verify-auth-azure`. - - >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). - - 1. Click **Register**. - ->**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. - -### 2. Create a new client secret - -From the Azure portal, create a client secret. Rancher will use this key to authenticate with Azure AD. - -1. Use search to open **App registrations** services. Then open the entry for Rancher that you created in the last procedure. - - ![Open Rancher Registration]({{}}/img/rancher/open-rancher-app.png) - -1. From the navigation pane on left, click **Certificates and Secrets**. - -1. Click **New client secret**. - - ![Create new client secret]({{< baseurl >}}/img/rancher/select-client-secret.png) - - 1. Enter a **Description** (something like `Rancher`). - - 1. Select duration for the key from the options under **Expires**. This drop-down sets the expiration date for the key. Shorter durations are more secure, but require you to create a new key after expiration. - - 1. Click **Add** (you don't need to enter a value—it will automatically populate after you save). - - -1. Copy the key value and save it to an [empty text file](#tip). - - You'll enter this key into the Rancher UI later as your **Application Secret**. - - You won't be able to access the key value again within the Azure UI. - -### 3. Set Required Permissions for Rancher - -Next, set API permissions for Rancher within Azure. - -1. From the navigation pane on left, select **API permissions**. - - ![Open Required Permissions]({{}}/img/rancher/select-required-permissions.png) - -1. Click **Add a permission**. - -1. From the **Azure Active Directory Graph**, select the following **Delegated Permissions**: - - ![Select API Permissions]({{< baseurl >}}/img/rancher/select-required-permissions-2.png) - -
-
- - **Access the directory as the signed-in user** - - **Read directory data** - - **Read all groups** - - **Read all users' full profiles** - - **Read all users' basic profiles** - - **Sign in and read user profile** - -1. Click **Add permissions**. - -1. From **API permissions**, click **Grant admin consent**. Then click **Yes**. - - >**Note:** You must be signed in as an Azure administrator to successfully save your permission settings. - - -### 4. Add a Reply URL - -To use Azure AD with Rancher you must whitelist Rancher with Azure. You can complete this whitelisting by providing Azure with a reply URL for Rancher, which is your Rancher Server URL followed with a verification path. - - -1. From the **Setting** blade, select **Reply URLs**. - - ![Azure: Enter Reply URL]({{}}/img/rancher/enter-azure-reply-url.png) - -1. From the **Reply URLs** blade, enter the URL of your Rancher Server, appended with the verification path: `/verify-auth-azure`. - - >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). - -1. Click **Save**. - -**Result:** Your reply URL is saved. - ->**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. - -### 5. Copy Azure Application Data - -As your final step in Azure, copy the data that you'll use to configure Rancher for Azure AD authentication and paste it into an empty text file. - -1. Obtain your Rancher **Tenant ID**. - - 1. Use search to open the **Azure Active Directory** service. - - ![Open Azure Active Directory]({{}}/img/rancher/search-azure-ad.png) - - 1. From the left navigation pane, open **Overview**. - - 2. Copy the **Directory ID** and paste it into your [text file](#tip). - - You'll paste this value into Rancher as your **Tenant ID**. - -1. Obtain your Rancher **Application ID**. - - 1. Use search to open **App registrations**. - - ![Open App Registrations]({{}}/img/rancher/search-app-registrations.png) - - 1. Find the entry you created for Rancher. - - 1. Copy the **Application ID** and paste it to your [text file](#tip). - -1. Obtain your Rancher **Graph Endpoint**, **Token Endpoint**, and **Auth Endpoint**. - - 1. From **App registrations**, click **Endpoints**. - - ![Click Endpoints]({{}}/img/rancher/click-endpoints.png) - - 2. Copy the following endpoints to your clipboard and paste them into your [text file](#tip) (these values will be your Rancher endpoint values). - - - **Microsoft Graph API endpoint** (Graph Endpoint) - - **OAuth 2.0 token endpoint (v1)** (Token Endpoint) - - **OAuth 2.0 authorization endpoint (v1)** (Auth Endpoint) - ->**Note:** Copy the v1 version of the endpoints - -### 5. Configure Azure AD in Rancher - -From the Rancher UI, enter information about your AD instance hosted in Azure to complete configuration. - -Enter the values that you copied to your [text file](#tip). - -1. Log into Rancher. From the **Global** view, select **Security > Authentication**. - -1. Select **Azure AD**. - -1. Complete the **Configure Azure AD Account** form using the information you copied while completing [Copy Azure Application Data](#5-copy-azure-application-data). - - >**Important:** When entering your Graph Endpoint, remove the tenant ID from the URL, like below. - > - >https://graph.windows.net/abb5adde-bee8-4821-8b03-e63efdc7701c - - The following table maps the values you copied in the Azure portal to the fields in Rancher. - - | Rancher Field | Azure Value | - | ------------------ | ------------------------------------- | - | Tenant ID | Directory ID | - | Application ID | Application ID | - | Application Secret | Key Value | - | Endpoint | https://login.microsoftonline.com/ | - | Graph Endpoint | Microsoft Azure AD Graph API Endpoint | - | Token Endpoint | OAuth 2.0 Token Endpoint | - | Auth Endpoint | OAuth 2.0 Authorization Endpoint | - -1. Click **Authenticate with Azure**. - -**Result:** Azure Active Directory authentication is configured. diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/freeipa/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/freeipa/_index.md deleted file mode 100644 index 8090d483fa6..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/freeipa/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Configuring FreeIPA -weight: 4 ---- - -If your organization uses FreeIPA for user authentication, you can configure Rancher to allow your users to login using their FreeIPA credentials. - ->**Prerequisites:** -> ->- You must have a [FreeIPA Server](https://www.freeipa.org/) configured. ->- Create a service account in FreeIPA with `read-only` access. Rancher uses this account to verify group membership when a user makes a request using an API key. ->- Read [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). - -2. From the **Global** view, select **Security > Authentication** from the main menu. - -3. Select **FreeIPA**. - -4. Complete the **Configure an FreeIPA server** form. - - You may need to log in to your domain controller to find the information requested in the form. - - >**Using TLS?** - >If the certificate is self-signed or not from a recognized certificate authority, make sure you provide the complete chain. That chain is needed to verify the server's certificate. -
-
- >**User Search Base vs. Group Search Base** - > - >Search base allows Rancher to search for users and groups that are in your FreeIPA. These fields are only for search bases and not for search filters. - > - >* If your users and groups are in the same search base, complete only the User Search Base. - >* If your groups are in a different search base, you can optionally complete the Group Search Base. This field is dedicated to searching groups, but is not required. - -5. If your FreeIPA deviates from the standard AD schema, complete the **Customize Schema** form to match it. Otherwise, skip this step. - - >**Search Attribute** The Search Attribute field defaults with three specific values: `uid|sn|givenName`. After FreeIPA is configured, when a user enters text to add users or groups, Rancher automatically queries the FreeIPA server and attempts to match fields by user id, last name, or first name. Rancher specifically searches for users/groups that begin with the text entered in the search field. - > - >The default field value `uid|sn|givenName`, but you can configure this field to a subset of these fields. The pipe (`|`) between the fields separates these fields. - > - > * `uid`: User ID - > * `sn`: Last Name - > * `givenName`: First Name - > - > With this search attribute, Rancher creates search filters for users and groups, but you *cannot* add your own search filters in this field. - -6. Enter your FreeIPA username and password in **Authenticate with FreeIPA** to confirm that Rancher is configured to use FreeIPA authentication. - -**Result:** - -- FreeIPA authentication is configured. -- You are signed into Rancher with your FreeIPA account (i.e., the _external principal_). diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/github/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/github/_index.md deleted file mode 100644 index fbd5a0cd90a..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/github/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Configuring GitHub -weight: 6 ---- - -In environments using GitHub, you can configure Rancher to allow sign on using GitHub credentials. - ->**Prerequisites:** Read [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). - -2. From the **Global** view, select **Security > Authentication** from the main menu. - -3. Select **GitHub**. - -4. Follow the directions displayed to **Setup a GitHub Application**. Rancher redirects you to GitHub to complete registration. - - >**What's an Authorization Callback URL?** - > - >The Authorization Callback URL is the URL where users go to begin using your application (i.e. the splash screen). - - >When you use external authentication, authentication does not actually take place in your application. Instead, authentication takes place externally (in this case, GitHub). After this external authentication completes successfully, the Authorization Callback URL is the location where the user re-enters your application. - -5. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. - - >**Where do I find the Client ID and Client Secret?** - > - >From GitHub, select Settings > Developer Settings > OAuth Apps. The Client ID and Client Secret are displayed prominently. - -6. Click **Authenticate with GitHub**. - -7. Use the **Site Access** options to configure the scope of user authorization. - - - **Allow any valid Users** - - _Any_ GitHub user can access Rancher. We generally discourage use of this setting! - - - **Allow members of Clusters, Projects, plus Authorized Users and Organizations** - - Any GitHub user or group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any GitHub user or group you add to the **Authorized Users and Organizations** list may log in to Rancher. - - - **Restrict access to only Authorized Users and Organizations** - - Only GitHub users or groups added to the Authorized Users and Organizations can log in to Rancher. -
-8. Click **Save**. - -**Result:** - -- GitHub authentication is configured. -- You are signed into Rancher with your GitHub account (i.e., the _external principal_). diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/google/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/google/_index.md deleted file mode 100644 index 1c77c5354b2..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/google/_index.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: Configuring Google OAuth -weight: 12 ---- - -If your organization uses G Suite for user authentication, you can configure Rancher to allow your users to log in using their G Suite credentials. - -Only admins of the G Suite domain have access to the Admin SDK. Therefore, only G Suite admins can configure Google OAuth for Rancher. - -Within Rancher, only administrators or users with the **Manage Authentication** [global role]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) can configure authentication. - -# Prerequisites -- You must have a [G Suite admin account](https://admin.google.com) configured. -- G Suite requires a [top private domain FQDN](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) as an authorized domain. One way to get an FQDN is by creating an A-record in Route53 for your Rancher server. You do not need to update your Rancher Server URL setting with that record, because there could be clusters using that URL. -- You must have the Admin SDK API enabled for your G Suite domain. You can enable it using the steps on [this page.](https://support.google.com/a/answer/60757?hl=en) - -After the Admin SDK API is enabled, your G Suite domain's API screen should look like this: -![Enable Admin APIs]({{}}/img/rancher/Google-Enable-APIs-Screen.png) - -# Setting up G Suite for OAuth with Rancher -Before you can set up Google OAuth in Rancher, you need to log in to your G Suite account and do the following: - -1. [Add Rancher as an authorized domain in G Suite](#1-adding-rancher-as-an-authorized-domain) -1. [Generate OAuth2 credentials for the Rancher server](#2-creating-oauth2-credentials-for-the-rancher-server) -1. [Create service account credentials for the Rancher server](#3-creating-service-account-credentials) -1. [Register the service account key as an OAuth Client](#4-register-the-service-account-key-as-an-oauth-client) - -### 1. Adding Rancher as an Authorized Domain -1. Click [here](https://console.developers.google.com/apis/credentials) to go to credentials page of your Google domain. -1. Select your project and click **OAuth consent screen.** -![OAuth Consent Screen]({{}}/img/rancher/Google-OAuth-consent-screen-tab.png) -1. Go to **Authorized Domains** and enter the top private domain of your Rancher server URL in the list. The top private domain is the rightmost superdomain. So for example, www.foo.co.uk a top private domain of foo.co.uk. For more information on top-level domains, refer to [this article.](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) -1. Go to **Scopes for Google APIs** and make sure **email,** **profile** and **openid** are enabled. - -**Result:** Rancher has been added as an authorized domain for the Admin SDK API. - -### 2. Creating OAuth2 Credentials for the Rancher Server -1. Go to the Google API console, select your project, and go to the [credentials page.](https://console.developers.google.com/apis/credentials) -![Credentials]({{}}/img/rancher/Google-Credentials-tab.png) -1. On the **Create Credentials** dropdown, select **OAuth client ID.** -1. Click **Web application.** -1. Provide a name. -1. Fill out the **Authorized JavaScript origins** and **Authorized redirect URIs.** Note: The Rancher UI page for setting up Google OAuth (available from the Global view under **Security > Authentication > Google**) provides you the exact links to enter for this step. - - Under **Authorized JavaScript origins,** enter your Rancher server URL. - - Under **Authorized redirect URIs,** enter your Rancher server URL appended with the path `verify-auth`. For example, if your URI is `https://rancherServer`, you will enter `https://rancherServer/verify-auth`. -1. Click on **Create.** -1. After the credential is created, you will see a screen with a list of your credentials. Choose the credential you just created, and in that row on rightmost side, click **Download JSON.** Save the file so that you can provide these credentials to Rancher. - -**Result:** Your OAuth credentials have been successfully created. - -### 3. Creating Service Account Credentials -Since the Google Admin SDK is available only to admins, regular users cannot use it to retrieve profiles of other users or their groups. Regular users cannot even retrieve their own groups. - -Since Rancher provides group-based membership access, we require the users to be able to get their own groups, and look up other users and groups when needed. - -As a workaround to get this capability, G Suite recommends creating a service account and delegating authority of your G Suite domain to that service account. - -This section describes how to: - -- Create a service account -- Create a key for the service account and download the credentials as JSON - -1. Click [here](https://console.developers.google.com/iam-admin/serviceaccounts) and select your project for which you generated OAuth credentials. -1. Click on **Create Service Account.** -1. Enter a name and click **Create.** -![Service account creation Step 1]({{}}/img/rancher/Google-svc-acc-step1.png) -1. Don't provide any roles on the **Service account permissions** page and click **Continue** -![Service account creation Step 2]({{}}/img/rancher/Google-svc-acc-step2.png) -1. Click on **Create Key** and select the JSON option. Download the JSON file and save it so that you can provide it as the service account credentials to Rancher. -![Service account creation Step 3]({{}}/img/rancher/Google-svc-acc-step3-key-creation.png) - -**Result:** Your service account is created. - -### 4. Register the Service Account Key as an OAuth Client - -You will need to grant some permissions to the service account you created in the last step. Rancher requires you to grant only read-only permissions for users and groups. - -Using the Unique ID of the service account key, register it as an Oauth Client using the following steps: - -1. Get the Unique ID of the key you just created. If it's not displayed in the list of keys right next to the one you created, you will have to enable it. To enable it, click **Unique ID** and click **OK.** This will add a **Unique ID** column to the list of service account keys. Save the one listed for the service account you created. NOTE: This is a numeric key, not to be confused with the alphanumeric field **Key ID.** - - ![Service account Unique ID]({{}}/img/rancher/Google-Select-UniqueID-column.png) -1. Go to the [**Manage OAuth Client Access** page.](https://admin.google.com/AdminHome?chromeless=1#OGX:ManageOauthClients) -1. Add the Unique ID obtained in the previous step in the **Client Name** field. -1. In the **One or More API Scopes** field, add the following scopes: - ``` - openid,profile,email,https://www.googleapis.com/auth/admin.directory.user.readonly,https://www.googleapis.com/auth/admin.directory.group.readonly - ``` -1. Click **Authorize.** - -**Result:** The service account is registered as an OAuth client in your G Suite account. - -# Configuring Google OAuth in Rancher -1. Sign into Rancher using a local user assigned the [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions) role. This user is also called the local principal. -1. From the **Global** view, click **Security > Authentication** from the main menu. -1. Click **Google.** The instructions in the UI cover the steps to set up authentication with Google OAuth. - 1. Admin Email: Provide the email of an administrator account from your GSuite setup. In order to perform user and group lookups, google apis require an administrator's email in conjunction with the service account key. - 1. Domain: Provide the domain on which you have configured GSuite. Provide the exact domain and not any aliases. - 1. Nested Group Membership: Check this box to enable nested group memberships. Rancher admins can disable this at any time after configuring auth. - - **Step One** is about adding Rancher as an authorized domain, which we already covered in [this section.](#1-adding-rancher-as-an-authorized-domain) - - For **Step Two,** provide the OAuth credentials JSON that you downloaded after completing [this section.](#2-creating-oauth2-credentials-for-the-rancher-server) You can upload the file or paste the contents into the **OAuth Credentials** field. - - For **Step Three,** provide the service account credentials JSON that downloaded at the end of [this section.](#3-creating-service-account-credentials) The credentials will only work if you successfully [registered the service account key](#4-register-the-service-account-key-as-an-oauth-client) as an OAuth client in your G Suite account. -1. Click **Authenticate with Google**. -1. Click **Save**. - -**Result:** Google authentication is successfully configured. diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/keycloak/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/keycloak/_index.md deleted file mode 100644 index f73660636f0..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/keycloak/_index.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: Configuring Keycloak (SAML) -description: Create a Keycloak SAML client and configure Rancher to work with Keycloak. By the end your users will be able to sign into Rancher using their Keycloak logins -weight: 7 ---- - -If your organization uses Keycloak Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - -## Prerequisites - -- You must have a [Keycloak IdP Server](https://www.keycloak.org/docs/latest/server_installation/) configured. -- In Keycloak, create a [new SAML client](https://www.keycloak.org/docs/latest/server_admin/#saml-clients), with the settings below. See the [Keycloak documentation](https://www.keycloak.org/docs/latest/server_admin/#saml-clients) for help. - - Setting | Value - ------------|------------ - `Sign Documents` | `ON` 1 - `Sign Assertions` | `ON` 1 - All other `ON/OFF` Settings | `OFF` - `Client ID` | `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata`2 - `Client Name` | (e.g. `rancher`) - `Client Protocol` | `SAML` - `Valid Redirect URI` | `https://yourRancherHostURL/v1-saml/keycloak/saml/acs` - - >1: Optionally, you can enable either one or both of these settings. - >2: Rancher SAML metadata won't be generated until a SAML provider is configured and saved. -- Export a `metadata.xml` file from your Keycloak client: - From the `Installation` tab, choose the `SAML Metadata IDPSSODescriptor` format option and download your file. - - -## Configuring Keycloak in Rancher - - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Keycloak**. - -1. Complete the **Configure Keycloak Account** form. Keycloak IdP lets you specify what data store you want to use. You can either add a database or use an existing LDAP server. For example, if you select your Active Directory (AD) server, the examples below describe how you can map AD attributes to fields within Rancher. - - - | Field | Description | - | ------------------------- | ----------------------------------------------------------------------------- | - | Display Name Field | The AD attribute that contains the display name of users. | - | User Name Field | The AD attribute that contains the user name/given name. | - | UID Field | An AD attribute that is unique to every user. | - | Groups Field | Make entries for managing group memberships. | - | Rancher API Host | The URL for your Rancher Server. | - | Private Key / Certificate | A key/certificate pair to create a secure shell between Rancher and your IdP. | - | IDP-metadata | The `metadata.xml` file that you exported from your IdP server. | - - >**Tip:** You can generate a key/certificate pair using an openssl command. For example: - > - > openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.cert - - -1. After you complete the **Configure Keycloak Account** form, click **Authenticate with Keycloak**, which is at the bottom of the page. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Keycloak IdP to validate your Rancher Keycloak configuration. - - >**Note:** You may have to disable your popup blocker to see the IdP login page. - -**Result:** Rancher is configured to work with Keycloak. Your users can now sign into Rancher using their Keycloak logins. - -{{< saml_caveats >}} - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. - -### You are not redirected to Keycloak - -When you click on **Authenticate with Keycloak**, your are not redirected to your IdP. - - * Verify your Keycloak client configuration. - * Make sure `Force Post Binding` set to `OFF`. - - -### Forbidden message displayed after IdP login - -You are correctly redirected to your IdP login page and you are able to enter your credentials, however you get a `Forbidden` message afterwards. - - * Check the Rancher debug log. - * If the log displays `ERROR: either the Response or Assertion must be signed`, make sure either `Sign Documents` or `Sign assertions` is set to `ON` in your Keycloak client. - -### HTTP 502 when trying to access /v1-saml/keycloak/saml/metadata - -This is usually due to the metadata not being created until a SAML provider is configured. -Try configuring and saving keycloak as your SAML provider and then accessing the metadata. - -### Keycloak Error: "We're sorry, failed to process response" - - * Check your Keycloak log. - * If the log displays `failed: org.keycloak.common.VerificationException: Client does not have a public key`, set `Encrypt Assertions` to `OFF` in your Keycloak client. - -### Keycloak Error: "We're sorry, invalid requester" - - * Check your Keycloak log. - * If the log displays `request validation failed: org.keycloak.common.VerificationException: SigAlg was null`, set `Client Signature Required` to `OFF` in your Keycloak client. - -### Keycloak 6.0.0+: IDPSSODescriptor missing from options - -Keycloak versions 6.0.0 and up no longer provide the IDP metadata under the `Installation` tab. -You can still get the XML from the following url: - -`https://{KEYCLOAK-URL}/auth/realms/{REALM-NAME}/protocol/saml/descriptor` - -The XML obtained from this URL contains `EntitiesDescriptor` as the root element. Rancher expects the root element to be `EntityDescriptor` rather than `EntitiesDescriptor`. So before passing this XML to Rancher, follow these steps to adjust it: - - * Copy all the tags from `EntitiesDescriptor` to the `EntityDescriptor`. - * Remove the `` tag from the beginning. - * Remove the `` from the end of the xml. - -You are left with something similar as the example below: - -``` - - .... - - -``` diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/local/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/local/_index.md deleted file mode 100644 index 8f973b8e26f..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/local/_index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Local Authentication -weight: 1 ---- - -Local authentication is the default until you configure an external authentication provider. Local authentication is where Rancher stores the user information, i.e. names and passwords, of who can log in to Rancher. By default, the `admin` user that logs in to Rancher for the first time is a local user. - -## Adding Local Users - -Regardless of whether you use external authentication, you should create a few local authentication users so that you can continue using Rancher if your external authentication service encounters issues. - -1. From the **Global** view, select **Users** from the navigation bar. - -2. Click **Add User**. Then complete the **Add User** form. Click **Create** when you're done. diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/microsoft-adfs/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/microsoft-adfs/_index.md deleted file mode 100644 index 8ddd77b8d85..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/microsoft-adfs/_index.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Configuring Microsoft Active Directory Federation Service (SAML) -weight: 9 ---- - -If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. - -## Prerequisites - - -- You must have Rancher installed. - - - Obtain your Rancher Server URL. During AD FS configuration, substitute this URL for the `` placeholder. - - - You must have a global administrator account on your Rancher installation. - -- You must have a [Microsoft AD FS Server](https://docs.microsoft.com/en-us/windows-server/identity/active-directory-federation-services) configured. - - - Obtain your AD FS Server IP/DNS name. During AD FS configuration, substitute this IP/DNS name for the `` placeholder. - - - You must have access to add [Relying Party Trusts](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/create-a-relying-party-trust) on your AD FS Server. - - - -## Setup Outline - -Setting up Microsoft AD FS with Rancher Server requires configuring AD FS on your Active Directory server, and configuring Rancher to utilize your AD FS server. The following pages serve as guides for setting up Microsoft AD FS authentication on your Rancher installation. - -- [1 — Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) -- [2 — Configuring Rancher for Microsoft AD FS]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup) - -{{< saml_caveats >}} - - -### [Next: Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md deleted file mode 100644 index 152834ec60c..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: 1 — Configuring Microsoft AD FS for Rancher -weight: 1205 ---- - -Before configuring Rancher to support AD FS users, you must add Rancher as a [relying party trust](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/technical-reference/understanding-key-ad-fs-concepts) in AD FS. - -1. Log into your AD server as an administrative user. - -1. Open the **AD FS Management** console. Select **Add Relying Party Trust...** from the **Actions** menu and click **Start**. - - {{< img "/img/rancher/adfs/adfs-overview.png" "">}} - -1. Select **Enter data about the relying party manually** as the option for obtaining data about the relying party. - - {{< img "/img/rancher/adfs/adfs-add-rpt-2.png" "">}} - -1. Enter your desired **Display name** for your Relying Party Trust. For example, `Rancher`. - - {{< img "/img/rancher/adfs/adfs-add-rpt-3.png" "">}} - -1. Select **AD FS profile** as the configuration profile for your relying party trust. - - {{< img "/img/rancher/adfs/adfs-add-rpt-4.png" "">}} - -1. Leave the **optional token encryption certificate** empty, as Rancher AD FS will not be using one. - - {{< img "/img/rancher/adfs/adfs-add-rpt-5.png" "">}} - -1. Select **Enable support for the SAML 2.0 WebSSO protocol** - and enter `https:///v1-saml/adfs/saml/acs` for the service URL. - - {{< img "/img/rancher/adfs/adfs-add-rpt-6.png" "">}} - -1. Add `https:///v1-saml/adfs/saml/metadata` as the **Relying party trust identifier**. - - {{< img "/img/rancher/adfs/adfs-add-rpt-7.png" "">}} - -1. This tutorial will not cover multi-factor authentication; please refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-additional-authentication-methods-for-ad-fs) if you would like to configure multi-factor authentication. - - {{< img "/img/rancher/adfs/adfs-add-rpt-8.png" "">}} - -1. From **Choose Issuance Authorization RUles**, you may select either of the options available according to use case. However, for the purposes of this guide, select **Permit all users to access this relying party**. - - {{< img "/img/rancher/adfs/adfs-add-rpt-9.png" "">}} - -1. After reviewing your settings, select **Next** to add the relying party trust. - - {{< img "/img/rancher/adfs/adfs-add-rpt-10.png" "">}} - - -1. Select **Open the Edit Claim Rules...** and click **Close**. - - {{< img "/img/rancher/adfs/adfs-add-rpt-11.png" "">}} - -1. On the **Issuance Transform Rules** tab, click **Add Rule...**. - - {{< img "/img/rancher/adfs/adfs-edit-cr.png" "">}} - -1. Select **Send LDAP Attributes as Claims** as the **Claim rule template**. - - {{< img "/img/rancher/adfs/adfs-add-tcr-1.png" "">}} - -1. Set the **Claim rule name** to your desired name (for example, `Rancher Attributes`) and select **Active Directory** as the **Attribute store**. Create the following mapping to reflect the table below: - - | LDAP Attribute | Outgoing Claim Type | - | -------------------------------------------- | ------------------- | - | Given-Name | Given Name | - | User-Principal-Name | UPN | - | Token-Groups - Qualified by Long Domain Name | Group | - | SAM-Account-Name | Name | -
- {{< img "/img/rancher/adfs/adfs-add-tcr-2.png" "">}} - -1. Download the `federationmetadata.xml` from your AD server at: -``` -https:///federationmetadata/2007-06/federationmetadata.xml -``` - -**Result:** You've added Rancher as a relying trust party. Now you can configure Rancher to leverage AD. - -### [Next: Configuring Rancher for Microsoft AD FS]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/) diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/microsoft-adfs/rancher-adfs-setup/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/microsoft-adfs/rancher-adfs-setup/_index.md deleted file mode 100644 index 3feb62b8581..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/microsoft-adfs/rancher-adfs-setup/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: 2 — Configuring Rancher for Microsoft AD FS -weight: 1205 ---- - -After you complete [Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/), enter your AD FS information into Rancher to allow AD FS users to authenticate with Rancher. - ->**Important Notes For Configuring Your AD FS Server:** -> ->- The SAML 2.0 WebSSO Protocol Service URL is: `https:///v1-saml/adfs/saml/acs` ->- The Relying Party Trust identifier URL is: `https:///v1-saml/adfs/saml/metadata` ->- You must export the `federationmetadata.xml` file from your AD FS server. This can be found at: `https:///federationmetadata/2007-06/federationmetadata.xml` - - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Microsoft Active Directory Federation Services**. - -1. Complete the **Configure AD FS Account** form. Microsoft AD FS lets you specify an existing Active Directory (AD) server. The examples below describe how you can map AD attributes to fields within Rancher. - - | Field | Description | - | ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | - | Display Name Field | The AD attribute that contains the display name of users.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name` | - | User Name Field | The AD attribute that contains the user name/given name.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname` | - | UID Field | An AD attribute that is unique to every user.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn` | - | Groups Field | Make entries for managing group memberships.

Example: `http://schemas.xmlsoap.org/claims/Group` | - | Rancher API Host | The URL for your Rancher Server. | - | Private Key / Certificate | This is a key-certificate pair to create a secure shell between Rancher and your AD FS. Ensure you set the Common Name (CN) to your Rancher Server URL.

[Certificate creation command](#cert-command) | - | Metadata XML | The `federationmetadata.xml` file exported from your AD FS server.

You can find this file at `https:///federationmetadata/2007-06/federationmetadata.xml`. | - - - >**Tip:** You can generate a certificate using an openssl command. For example: - > - > openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - - - -1. After you complete the **Configure AD FS Account** form, click **Authenticate with AD FS**, which is at the bottom of the page. - - Rancher redirects you to the AD FS login page. Enter credentials that authenticate with Microsoft AD FS to validate your Rancher AD FS configuration. - - >**Note:** You may have to disable your popup blocker to see the AD FS login page. - -**Result:** Rancher is configured to work with MS FS. Your users can now sign into Rancher using their MS FS logins. diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/okta/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/okta/_index.md deleted file mode 100644 index 53f37810ab4..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/okta/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Configuring Okta (SAML) -weight: 10 ---- - -If your organization uses Okta Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - ->**Note:** Okta integration only supports Service Provider initiated logins. - -## Prerequisites - -In Okta, create a SAML Application with the settings below. See the [Okta documentation](https://developer.okta.com/standards/SAML/setting_up_a_saml_application_in_okta) for help. - -Setting | Value -------------|------------ -`Single Sign on URL` | `https://yourRancherHostURL/v1-saml/okta/saml/acs` -`Audience URI (SP Entity ID)` | `https://yourRancherHostURL/v1-saml/okta/saml/metadata` - -## Configuring Okta in Rancher - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Okta**. - -1. Complete the **Configure Okta Account** form. The examples below describe how you can map Okta attributes from attribute statements to fields within Rancher. - - | Field | Description | - | ------------------------- | ----------------------------------------------------------------------------- | - | Display Name Field | The attribute name from an attribute statement that contains the display name of users. | - | User Name Field | The attribute name from an attribute statement that contains the user name/given name. | - | UID Field | The attribute name from an attribute statement that is unique to every user. | - | Groups Field | The attribute name in a group attribute statement that exposes your groups. | - | Rancher API Host | The URL for your Rancher Server. | - | Private Key / Certificate | A key/certificate pair used for Assertion Encryption. | - | Metadata XML | The `Identity Provider metadata` file that you find in the application `Sign On` section. | - - >**Tip:** You can generate a key/certificate pair using an openssl command. For example: - > - > openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.crt - - - -1. After you complete the **Configure Okta Account** form, click **Authenticate with Okta**, which is at the bottom of the page. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Okta IdP to validate your Rancher Okta configuration. - - >**Note:** If nothing seems to happen, it's likely because your browser blocked the pop-up. Make sure you disable the pop-up blocker for your rancher domain and whitelist it in any other extensions you might utilize. - -**Result:** Rancher is configured to work with Okta. Your users can now sign into Rancher using their Okta logins. - -{{< saml_caveats >}} diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/openldap/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/openldap/_index.md deleted file mode 100644 index 3ee68476cab..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/openldap/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Configuring OpenLDAP -weight: 3 ---- - -If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. - -## Prerequisites - -Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). - -> **Using TLS?** -> -> If the certificate used by the OpenLDAP server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -## Configure OpenLDAP in Rancher - -Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](./openldap-config) - -> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Log into the Rancher UI using the initial local `admin` account. -2. From the **Global** view, navigate to **Security** > **Authentication** -3. Select **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. - -### Test Authentication - -Once you have completed the configuration, proceed by testing the connection to the OpenLDAP server. Authentication with OpenLDAP will be enabled implicitly if the test is successful. - -> **Note:** -> -> The OpenLDAP user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which LDAP account you use to perform this step. - -1. Enter the **username** and **password** for the OpenLDAP account that should be mapped to the local principal account. -2. Click **Authenticate With OpenLDAP** to test the OpenLDAP connection and finalise the setup. - -**Result:** - -- OpenLDAP authentication is configured. -- The LDAP user pertaining to the entered credentials is mapped to the local principal (administrative) account. - -> **Note:** -> -> You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/openldap/openldap-config/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/openldap/openldap-config/_index.md deleted file mode 100644 index addd6773a60..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/openldap/openldap-config/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: OpenLDAP Configuration Reference -weight: 2 ---- - -This section is intended to be used as a reference when setting up an OpenLDAP authentication provider in Rancher. - -For further details on configuring OpenLDAP, refer to the [official documentation.](https://www.openldap.org/doc/) - -> Before you proceed with the configuration, please familiarize yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -- [Background: OpenLDAP Authentication Flow](#background-openldap-authentication-flow) -- [OpenLDAP server configuration](#openldap-server-configuration) -- [User/group schema configuration](#user-group-schema-configuration) - - [User schema configuration](#user-schema-configuration) - - [Group schema configuration](#group-schema-configuration) - -## Background: OpenLDAP Authentication Flow - -1. When a user attempts to login with his LDAP credentials, Rancher creates an initial bind to the LDAP server using a service account with permissions to search the directory and read user/group attributes. -2. Rancher then searches the directory for the user by using a search filter based on the provided username and configured attribute mappings. -3. Once the user has been found, he is authenticated with another LDAP bind request using the user's DN and provided password. -4. Once authentication succeeded, Rancher then resolves the group memberships both from the membership attribute in the user's object and by performing a group search based on the configured user mapping attribute. - -# OpenLDAP Server Configuration - -You will need to enter the address, port, and protocol to connect to your OpenLDAP server. `389` is the standard port for insecure traffic, `636` for TLS traffic. - -> **Using TLS?** -> -> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -If you are in doubt about the correct values to enter in the user/group Search Base configuration fields, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{}}/rancher/v2.x/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. - -
OpenLDAP Server Parameters
- -| Parameter | Description | -|:--|:--| -| Hostname | Specify the hostname or IP address of the OpenLDAP server | -| Port | Specify the port at which the OpenLDAP server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| -| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS). You will also need to paste in the CA certificate if the server uses a self-signed/enterprise-signed certificate. | -| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the server unreachable. | -| Service Account Distinguished Name | Enter the Distinguished Name (DN) of the user that should be used to bind, search and retrieve LDAP entries. (see [Prerequisites](#prerequisites)). | -| Service Account Password | The password for the service account. | -| User Search Base | Enter the Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| -| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave this field empty. For example: "ou=groups,dc=acme,dc=com".| - -# User/Group Schema Configuration - -If your OpenLDAP directory deviates from the standard OpenLDAP schema, you must complete the **Customize Schema** section to match it. - -Note that the attribute mappings configured in this section are used by Rancher to construct search filters and resolve group membership. It is therefore always recommended to verify that the configuration here matches the schema used in your OpenLDAP. - -If you are unfamiliar with the user/group schema used in the OpenLDAP server, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{}}/rancher/v2.x/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. - -### User Schema Configuration - -The table below details the parameters for the user schema configuration. - -
User Schema Configuration Parameters
- -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Username Attribute | The user attribute whose value is suitable as a display name. | -| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. This is typically `uid`. | -| User Member Attribute | The user attribute containing the Distinguished Name of groups a user is member of. Usually this is one of `memberOf` or `isMemberOf`. | -| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the LDAP server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. | -| User Enabled Attribute | If the schema of your OpenLDAP server supports a user attribute whose value can be evaluated to determine if the account is disabled or locked, enter the name of that attribute. The default OpenLDAP schema does not support this and the field should usually be left empty. | -| Disabled Status Bitmask | This is the value for a disabled/locked user account. The parameter is ignored if `User Enabled Attribute` is empty. | - -### Group Schema Configuration - -The table below details the parameters for the group schema configuration. - -
Group Schema Configuration Parameters
- -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for group entries in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Name Attribute | The group attribute whose value is suitable for a display name. | -| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | -| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | -| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects in the UI. See description of user schema `Search Attribute`. | -| Group DN Attribute | The name of the group attribute whose format matches the values in the user's group membership attribute. See `User Member Attribute`. | -| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organization makes use of these nested memberships (ie. you have groups that contain other groups as members). This option is disabled if you are using Shibboleth. | \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/ping-federate/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/ping-federate/_index.md deleted file mode 100644 index 1deef3746a5..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/ping-federate/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Configuring PingIdentity (SAML) -weight: 8 ---- - -If your organization uses Ping Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - ->**Prerequisites:** -> ->- You must have a [Ping IdP Server](https://www.pingidentity.com/) configured. ->- Following are the Rancher Service Provider URLs needed for configuration: -Metadata URL: `https:///v1-saml/ping/saml/metadata` -Assertion Consumer Service (ACS) URL: `https:///v1-saml/ping/saml/acs` -Note that these URLs will not return valid data until the authentication configuration is saved in Rancher. ->- Export a `metadata.xml` file from your IdP Server. For more information, see the [PingIdentity documentation](https://documentation.pingidentity.com/pingfederate/pf83/index.shtml#concept_exportingMetadata.html). - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **PingIdentity**. - -1. Complete the **Configure Ping Account** form. Ping IdP lets you specify what data store you want to use. You can either add a database or use an existing ldap server. For example, if you select your Active Directory (AD) server, the examples below describe how you can map AD attributes to fields within Rancher. - - 1. **Display Name Field**: Enter the AD attribute that contains the display name of users (example: `displayName`). - - 1. **User Name Field**: Enter the AD attribute that contains the user name/given name (example: `givenName`). - - 1. **UID Field**: Enter an AD attribute that is unique to every user (example: `sAMAccountName`, `distinguishedName`). - - 1. **Groups Field**: Make entries for managing group memberships (example: `memberOf`). - - 1. **Rancher API Host**: Enter the URL for your Rancher Server. - - 1. **Private Key** and **Certificate**: This is a key-certificate pair to create a secure shell between Rancher and your IdP. - - You can generate one using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - 1. **IDP-metadata**: The `metadata.xml` file that you [exported from your IdP server](https://documentation.pingidentity.com/pingfederate/pf83/index.shtml#concept_exportingMetadata.html). - - -1. After you complete the **Configure Ping Account** form, click **Authenticate with Ping**, which is at the bottom of the page. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Ping IdP to validate your Rancher PingIdentity configuration. - - >**Note:** You may have to disable your popup blocker to see the IdP login page. - -**Result:** Rancher is configured to work with PingIdentity. Your users can now sign into Rancher using their PingIdentity logins. - -{{< saml_caveats >}} diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/shibboleth/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/shibboleth/_index.md deleted file mode 100644 index f31a13226fc..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/shibboleth/_index.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: Configuring Shibboleth (SAML) -weight: 11 ---- - -If your organization uses Shibboleth Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in to Rancher using their Shibboleth credentials. - -In this configuration, when Rancher users log in, they will be redirected to the Shibboleth IdP to enter their credentials. After authentication, they will be redirected back to the Rancher UI. - -If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then the authenticated user will be able to access resources in Rancher that their groups have permissions for. - -> The instructions in this section assume that you understand how Rancher, Shibboleth, and OpenLDAP work together. For a more detailed explanation of how it works, refer to [this page.](./about) - -This section covers the following topics: - -- [Setting up Shibboleth in Rancher](#setting-up-shibboleth-in-rancher) - - [Shibboleth Prerequisites](#shibboleth-prerequisites) - - [Configure Shibboleth in Rancher](#configure-shibboleth-in-rancher) - - [SAML Provider Caveats](#saml-provider-caveats) -- [Setting up OpenLDAP in Rancher](#setting-up-openldap-in-rancher) - - [OpenLDAP Prerequisites](#openldap-prerequisites) - - [Configure OpenLDAP in Rancher](#configure-openldap-in-rancher) - - [Troubleshooting](#troubleshooting) - -# Setting up Shibboleth in Rancher - -### Shibboleth Prerequisites -> ->- You must have a Shibboleth IdP Server configured. ->- Following are the Rancher Service Provider URLs needed for configuration: -Metadata URL: `https:///v1-saml/shibboleth/saml/metadata` -Assertion Consumer Service (ACS) URL: `https:///v1-saml/shibboleth/saml/acs` ->- Export a `metadata.xml` file from your IdP Server. For more information, see the [Shibboleth documentation.](https://wiki.shibboleth.net/confluence/display/SP3/Home) - -### Configure Shibboleth in Rancher -If your organization uses Shibboleth for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Shibboleth**. - -1. Complete the **Configure Shibboleth Account** form. Shibboleth IdP lets you specify what data store you want to use. You can either add a database or use an existing ldap server. For example, if you select your Active Directory (AD) server, the examples below describe how you can map AD attributes to fields within Rancher. - - 1. **Display Name Field**: Enter the AD attribute that contains the display name of users (example: `displayName`). - - 1. **User Name Field**: Enter the AD attribute that contains the user name/given name (example: `givenName`). - - 1. **UID Field**: Enter an AD attribute that is unique to every user (example: `sAMAccountName`, `distinguishedName`). - - 1. **Groups Field**: Make entries for managing group memberships (example: `memberOf`). - - 1. **Rancher API Host**: Enter the URL for your Rancher Server. - - 1. **Private Key** and **Certificate**: This is a key-certificate pair to create a secure shell between Rancher and your IdP. - - You can generate one using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - 1. **IDP-metadata**: The `metadata.xml` file that you exported from your IdP server. - - -1. After you complete the **Configure Shibboleth Account** form, click **Authenticate with Shibboleth**, which is at the bottom of the page. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Shibboleth IdP to validate your Rancher Shibboleth configuration. - - >**Note:** You may have to disable your popup blocker to see the IdP login page. - -**Result:** Rancher is configured to work with Shibboleth. Your users can now sign into Rancher using their Shibboleth logins. - -### SAML Provider Caveats - -If you configure Shibboleth without OpenLDAP, the following caveats apply due to the fact that SAML Protocol does not support search or lookup for users or groups. - -- There is no validation on users or groups when assigning permissions to them in Rancher. -- When adding users, the exact user IDs (i.e. UID Field) must be entered correctly. As you type the user ID, there will be no search for other user IDs that may match. -- When adding groups, you must select the group from the drop-down that is next to the text box. Rancher assumes that any input from the text box is a user. -- The group drop-down shows only the groups that you are a member of. You will not be able to add groups that you are not a member of. - -To enable searching for groups when assigning permissions in Rancher, you will need to configure a back end for the SAML provider that supports groups, such as OpenLDAP. - -# Setting up OpenLDAP in Rancher - -If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then authenticated users will be able to access resources in Rancher that their groups have permissions for. - -### OpenLDAP Prerequisites - -Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). - -> **Using TLS?** -> -> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -### Configure OpenLDAP in Rancher - -Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.]({{}}/rancher/v2.x/en/admin-settings/authentication/openldap/openldap-config) Note that nested group membership is not available for Shibboleth. - -> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Log into the Rancher UI using the initial local `admin` account. -2. From the **Global** view, navigate to **Security** > **Authentication** -3. Select **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. - -# Troubleshooting - -If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/shibboleth/about/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/shibboleth/about/_index.md deleted file mode 100644 index fc8797e82ef..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/shibboleth/about/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Group Permissions with Shibboleth and OpenLDAP -weight: 1 ---- - -This page provides background information and context for Rancher users who intend to set up the Shibboleth authentication provider in Rancher. - -Because Shibboleth is a SAML provider, it does not support searching for groups. While a Shibboleth integration can validate user credentials, it can't be used to assign permissions to groups in Rancher without additional configuration. - -One solution to this problem is to configure an OpenLDAP identity provider. With an OpenLDAP back end for Shibboleth, you will be able to search for groups in Rancher and assign them to resources such as clusters, projects, or namespaces from the Rancher UI. - -### Terminology - -- **Shibboleth** is a single sign-on log-in system for computer networks and the Internet. It allows people to sign in using just one identity to various systems. It validates user credentials, but does not, on its own, handle group memberships. -- **SAML:** Security Assertion Markup Language, an open standard for exchanging authentication and authorization data between an identity provider and a service provider. -- **OpenLDAP:** a free, open-source implementation of the Lightweight Directory Access Protocol (LDAP). It is used to manage an organization’s computers and users. OpenLDAP is useful for Rancher users because it supports groups. In Rancher, it is possible to assign permissions to groups so that they can access resources such as clusters, projects, or namespaces, as long as the groups already exist in the identity provider. -- **IdP or IDP:** An identity provider. OpenLDAP is an example of an identity provider. - -### Adding OpenLDAP Group Permissions to Rancher Resources - -The diagram below illustrates how members of an OpenLDAP group can access resources in Rancher that the group has permissions for. - -For example, a cluster owner could add an OpenLDAP group to a cluster so that they have permissions view most cluster level resources and create new projects. Then the OpenLDAP group members will have access to the cluster as soon as they log in to Rancher. - -In this scenario, OpenLDAP allows the cluster owner to search for groups when assigning persmissions. Without OpenLDAP, the functionality to search for groups would not be supported. - -When a member of the OpenLDAP group logs in to Rancher, she is redirected to Shibboleth and enters her username and password. - -Shibboleth validates her credentials, and retrieves user attributes from OpenLDAP, including groups. Then Shibboleth sends a SAML assertion to Rancher including the user attributes. Rancher uses the group data so that she can access all of the resources and permissions that her groups have permissions for. - -![Adding OpenLDAP Group Permissions to Rancher Resources]({{}}/img/rancher/shibboleth-with-openldap-groups.svg) - \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/access-control/authentication/user-groups/_index.md b/content/rancher/v2.5/en/ecm/access-control/authentication/user-groups/_index.md deleted file mode 100644 index 8b3fd232551..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/authentication/user-groups/_index.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Users and Groups -weight: 1 ---- - -Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When you configure an external authentication provider, users from that provider will be able to log in to your Rancher server. When a user logs in, the authentication provider will supply your Rancher server with a list of groups to which the user belongs. - -Access to clusters, projects, multi-cluster apps, and global DNS providers and entries can be controlled by adding either individual users or groups to these resources. When you add a group to a resource, all users who are members of that group in the authentication provider, will be able to access the resource with the permissions that you've specified for the group. For more information on roles and permissions, see [Role Based Access Control]({{}}/rancher/v2.x/en/admin-settings/rbac/). - -## Managing Members - -When adding a user or group to a resource, you can search for users or groups by beginning to type their name. The Rancher server will query the authentication provider to find users and groups that match what you've entered. Searching is limited to the authentication provider that you are currently logged in with. For example, if you've enabled GitHub authentication but are logged in using a [local]({{}}/rancher/v2.x/en/admin-settings/authentication/local/) user account, you will not be able to search for GitHub users or groups. - -All users, whether they are local users or from an authentication provider, can be viewed and managed. From the **Global** view, click on **Users**. - -{{< saml_caveats >}} - -## User Information - -Rancher maintains information about each user that logs in through an authentication provider. This information includes whether the user is allowed to access your Rancher server and the list of groups that the user belongs to. Rancher keeps this user information so that the CLI, API, and kubectl can accurately reflect the access that the user has based on their group membership in the authentication provider. - -Whenever a user logs in to the UI using an authentication provider, Rancher automatically updates this user information. - -### Automatically Refreshing User Information - -Rancher will periodically refresh the user information even before a user logs in through the UI. You can control how often Rancher performs this refresh. From the **Global** view, click on **Settings**. Two settings control this behavior: - -- **`auth-user-info-max-age-seconds`** - - This setting controls how old a user's information can be before Rancher refreshes it. If a user makes an API call (either directly or by using the Rancher CLI or kubectl) and the time since the user's last refresh is greater than this setting, then Rancher will trigger a refresh. This setting defaults to `3600` seconds, i.e. 1 hour. - -- **`auth-user-info-resync-cron`** - - This setting controls a recurring schedule for resyncing authentication provider information for all users. Regardless of whether a user has logged in or used the API recently, this will cause the user to be refreshed at the specified interval. This setting defaults to `0 0 * * *`, i.e. once a day at midnight. See the [Cron documentation](https://en.wikipedia.org/wiki/Cron) for more information on valid values for this setting. - - -> **Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support periodically refreshing user information. User information will only be refreshed when the user logs into the Rancher UI. - -### Manually Refreshing User Information - -If you are not sure the last time Rancher performed an automatic refresh of user information, you can perform a manual refresh of all users. - -1. From the **Global** view, click on **Users** in the navigation bar. - -1. Click on **Refresh Group Memberships**. - -**Results:** Rancher refreshes the user information for all users. Requesting this refresh will update which users can access Rancher as well as all the groups that each user belongs to. - ->**Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support the ability to manually refresh user information. User information will only be refreshed when the user logs into the Rancher UI. - - -## Session Length - -The default length (TTL) of each user session is adjustable. The default session length is 16 hours. - -1. From the **Global** view, click on **Settings**. -1. In the **Settings** page, find **`auth-user-session-ttl-minutes`** and click **Edit.** -1. Enter the amount of time in minutes a session length should last and click **Save.** - -**Result:** Users are automatically logged out of Rancher after the set number of minutes. diff --git a/content/rancher/v2.5/en/ecm/access-control/rbac/_index.md b/content/rancher/v2.5/en/ecm/access-control/rbac/_index.md deleted file mode 100644 index 610aed2ab4a..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/rbac/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Role-Based Access Control (RBAC) -weight: 2 ---- - -> This section is under construction. - -Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/), users can either be local or external. - -After you configure external authentication, the users that display on the **Users** page changes. - -- If you are logged in as a local user, only local users display. - -- If you are logged in as an external user, both external and local users display. - -## Users and Roles - -Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by _global permissions_, and _cluster and project roles_. - -- [Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/): - - Define user authorization outside the scope of any particular cluster. - -- [Cluster and Project Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/): - - Define user authorization inside the specific cluster or project where they are assigned the role. - -Both global permissions and cluster and project roles are implemented on top of [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). Therefore, enforcement of permissions and roles is performed by Kubernetes. diff --git a/content/rancher/v2.5/en/ecm/access-control/rbac/add-users-to-cluster/_index.md b/content/rancher/v2.5/en/ecm/access-control/rbac/add-users-to-cluster/_index.md deleted file mode 100644 index 60ca968ebdd..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/rbac/add-users-to-cluster/_index.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Adding Users to a Cluster -weight: 1 ---- - -> This section is under construction. - -If you want to provide a user with access and permissions to _all_ projects, nodes, and resources within a cluster, assign the user a cluster membership. - ->**Tip:** Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members/) instead. - -There are two contexts where you can add cluster members: - -- Adding Members to a New Cluster - - You can add members to a cluster as you create it (recommended if possible). - -- [Adding Members to an Existing Cluster](#editing-cluster-membership) - - You can always add members to a cluster after a cluster is provisioned. - -## Editing Cluster Membership - -Cluster administrators can edit the membership for a cluster, controlling which Rancher users can access the cluster and what features they can use. - -1. From the **Global** view, open the cluster that you want to add members to. - -2. From the main menu, select **Members**. Then click **Add Member**. - -3. Search for the user or group that you want to add to the cluster. - - If external authentication is configured: - - - Rancher returns users from your [external authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/) source as you type. - - >**Using AD but can't find your users?** - >There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5]({{}}/rancher/v2.x/en/admin-settings/authentication/ad/). - - - A drop-down allows you to add groups instead of individual users. The drop-down only lists groups that you, the logged in user, are part of. - - >**Note:** If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -4. Assign the user or group **Cluster** roles. - - [What are Cluster Roles?]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) - - >**Tip:** For Custom Roles, you can modify the list of individual roles available for assignment. - > - > - To add roles to the list, [Add a Custom Role]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). - > - To remove roles from the list, [Lock/Unlock Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/locked-roles). - -**Result:** The chosen users are added to the cluster. - -- To revoke cluster membership, select the user and click **Delete**. This action deletes membership, not the user. -- To modify a user's roles in the cluster, delete them from the cluster, and then re-add them with modified roles. diff --git a/content/rancher/v2.5/en/ecm/access-control/rbac/default-custom-roles/_index.md b/content/rancher/v2.5/en/ecm/access-control/rbac/default-custom-roles/_index.md deleted file mode 100644 index 73d87c95a55..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/rbac/default-custom-roles/_index.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: Custom Roles -weight: 2 ---- - -Within Rancher, _roles_ determine what actions a user can make within a cluster or project. - -Note that _roles_ are different from _permissions_, which determine what clusters and projects you can access. - -This section covers the following topics: - -- [Prerequisites](#prerequisites) -- [Creating a custom role for a cluster or project](#creating-a-custom-role-for-a-cluster-or-project) -- [Creating a custom global role](#creating-a-custom-global-role) -- [Deleting a custom global role](#deleting-a-custom-global-role) -- [Assigning a custom global role to a group](#assigning-a-custom-global-role-to-a-group) - -## Prerequisites - -To complete the tasks on this page, one of the following permissions are required: - - - [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/). - - [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. - -## Creating A Custom Role for a Cluster or Project - -While Rancher comes out-of-the-box with a set of default user roles, you can also create default custom roles to provide users with very specific permissions within Rancher. - -The steps to add custom roles differ depending on the version of Rancher. - -{{% tabs %}} -{{% tab "Rancher v2.0.7+" %}} - -1. From the **Global** view, select **Security > Roles** from the main menu. - -1. Select a tab to determine the scope of the roles you're adding. The tabs are: - - - **Cluster:** The role is valid for assignment when adding/managing members to _only_ clusters. - - **Project:** The role is valid for assignment when adding/managing members to _only_ projects. - -1. Click **Add Cluster/Project Role.** - -1. **Name** the role. - -1. Optional: Choose the **Cluster/Project Creator Default** option to assign this role to a user when they create a new cluster or project. Using this feature, you can expand or restrict the default roles for cluster/project creators. - - > Out of the box, the Cluster Creator Default and the Project Creator Default roles are `Cluster Owner` and `Project Owner` respectively. - -1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. - - > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - - You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. - -1. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. - -1. Click **Create**. - -{{% /tab %}} -{{% tab "Rancher prior to v2.0.7" %}} - -1. From the **Global** view, select **Security > Roles** from the main menu. - -1. Click **Add Role**. - -1. **Name** the role. - -1. Choose whether to set the role to a status of [locked]({{}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/). - - > **Note:** Locked roles cannot be assigned to users. - -1. In the **Context** dropdown menu, choose the scope of the role assigned to the user. The contexts are: - - - **All:** The user can use their assigned role regardless of context. This role is valid for assignment when adding/managing members to clusters or projects. - - - **Cluster:** This role is valid for assignment when adding/managing members to _only_ clusters. - - - **Project:** This role is valid for assignment when adding/managing members to _only_ projects. - -1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. - - > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - - You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. - -1. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. - -1. Click **Create**. - -{{% /tab %}} -{{% /tabs %}} - -## Creating a Custom Global Role - -### Creating a Custom Global Role that Copies Rules from an Existing Role - -If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role in which all of the rules from another role, such as the administrator role, are copied into a new role. This allows you to only configure the variations between the existing role and the new role. - -The custom global role can then be assigned to a user or group so that the custom global role takes effect the first time the user or users sign into Rancher. - -To create a custom global role based on an existing role, - -1. Go to the **Global** view and click **Security > Roles.** -1. On the **Global** tab, go to the role that the custom global role will be based on. Click **⋮ (…) > Clone.** -1. Enter a name for the role. -1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** -1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. -1. Click **Save.** - -### Creating a Custom Global Role that Does Not Copy Rules from Another Role - -Custom global roles don't have to be based on existing roles. To create a custom global role by choosing the specific Kubernetes resource operations that should be allowed for the role, follow these steps: - -1. Go to the **Global** view and click **Security > Roles.** -1. On the **Global** tab, click **Add Global Role.** -1. Enter a name for the role. -1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** -1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. -1. Click **Save.** - -## Deleting a Custom Global Role - -When deleting a custom global role, all global role bindings with this custom role are deleted. - -If a user is only assigned one custom global role, and the role is deleted, the user would lose access to Rancher. For the user to regain access, an administrator would need to edit the user and apply new global permissions. - -Custom global roles can be deleted, but built-in roles cannot be deleted. - -To delete a custom global role, - -1. Go to the **Global** view and click **Security > Roles.** -2. On the **Global** tab, go to the custom global role that should be deleted and click **⋮ (…) > Delete.** -3. Click **Delete.** - -## Assigning a Custom Global Role to a Group - -If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role. When the role is assigned to a group, the users in the group have the appropriate level of access the first time they sign into Rancher. - -When a user in the group logs in, they get the built-in Standard User global role by default. They will also get the permissions assigned to their groups. - -If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have their individual Standard User role. - -> **Prerequisites:** You can only assign a global role to a group if: -> -> * You have set up an [external authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-vs-local-authentication) -> * The external authentication provider supports [user groups]({{}}/rancher/v2.x/en/admin-settings/authentication/user-groups/) -> * You have already set up at least one user group with the authentication provider - -To assign a custom global role to a group, follow these steps: - -1. From the **Global** view, go to **Security > Groups.** -1. Click **Assign Global Role.** -1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. -1. In the **Custom** section, choose any custom global role that will be assigned to the group. -1. Optional: In the **Global Permissions** or **Built-in** sections, select any additional permissions that the group should have. -1. Click **Create.** - -**Result:** The custom global role will take effect when the users in the group log into Rancher. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/access-control/rbac/locked-roles/_index.md b/content/rancher/v2.5/en/ecm/access-control/rbac/locked-roles/_index.md deleted file mode 100644 index 616982432a5..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/rbac/locked-roles/_index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Locked Roles -weight: 3 ---- - -You can set roles to a status of `locked`. Locking roles prevent them from being assigned users in the future. - -Locked roles: - -- Cannot be assigned to users that don't already have it assigned. -- Are not listed in the **Member Roles** drop-down when you are adding a user to a cluster or project. -- Do not affect users assigned the role before you lock the role. These users retain access that the role provides. - - **Example:** let's say your organization creates an internal policy that users assigned to a cluster are prohibited from creating new projects. It's your job to enforce this policy. - - To enforce it, before you add new users to the cluster, you should lock the following roles: `Cluster Owner`, `Cluster Member`, and `Create Projects`. Then you could create a new custom role that includes the same permissions as a __Cluster Member__, except the ability to create projects. Then, you use this new custom role when adding users to a cluster. - -Roles can be locked by the following users: - -- Any user assigned the `Administrator` global permission. -- Any user assigned the `Custom Users` permission, along with the `Manage Roles` role. - - -## Locking/Unlocking Roles - -If you want to prevent a role from being assigned to users, you can set it to a status of `locked`. - -You can lock roles in two contexts: - -- When you're [adding a custom role]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). -- When you editing an existing role (see below). - -1. From the **Global** view, select **Security** > **Roles**. - -2. From the role that you want to lock (or unlock), select **⋮** > **Edit**. - -3. From the **Locked** option, choose the **Yes** or **No** radio button. Then click **Save**. diff --git a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/_index.md b/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/_index.md deleted file mode 100644 index bcbb52e72eb..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Access Control for the Enterprise Cluster Manager and Projects -shortTitle: Enterprise Cluster Manager -weight: 4 ---- - -> This section is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/ace/_index.md b/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/ace/_index.md deleted file mode 100644 index 3a1c87250e4..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/ace/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: How the Authorized Cluster Endpoint Works -weight: 7 ---- - -This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](../kubectl/#authenticating-directly-with-a-downstream-cluster) - -### About the kubeconfig File - -The _kubeconfig file_ is a file used to configure access to Kubernetes when used in conjunction with the kubectl command line tool (or other clients). - -This kubeconfig file and its contents are specific to the cluster you are viewing. It can be downloaded from the cluster view in Rancher. You will need a separate kubeconfig file for each cluster that you have access to in Rancher. - -After you download the kubeconfig file, you will be able to use the kubeconfig file and its Kubernetes [contexts](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration) to access your downstream cluster. - -### Two Authentication Methods for RKE Clusters - -If the cluster is not an [RKE cluster,]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) the kubeconfig file allows you to access the cluster in only one way: it lets you be authenticated with the Rancher server, then Rancher allows you to run kubectl commands on the cluster. - -For RKE clusters, the kubeconfig file allows you to be authenticated in two ways: - -- **Through the Rancher server authentication proxy:** Rancher's authentication proxy validates your identity, then connects you to the downstream cluster that you want to access. -- **Directly with the downstream cluster's API server:** RKE clusters have an authorized cluster endpoint enabled by default. This endpoint allows you to access your downstream Kubernetes cluster with the kubectl CLI and a kubeconfig file, and it is enabled by default for RKE clusters. In this scenario, the downstream cluster's Kubernetes API server authenticates you by calling a webhook (the `kube-api-auth` microservice) that Rancher set up. - -This second method, the capability to connect directly to the cluster's Kubernetes API server, is important because it lets you access your downstream cluster if you can't connect to Rancher. - -To use the authorized cluster endpoint, you will need to configure kubectl to use the extra kubectl context in the kubeconfig file that Rancher generates for you when the RKE cluster is created. This file can be downloaded from the cluster view in the Rancher UI, and the instructions for configuring kubectl are on [this page.](../kubectl/#authenticating-directly-with-a-downstream-cluster) - -These methods of communicating with downstream Kubernetes clusters are also explained in the [architecture page]({{}}/rancher/v2.x/en/overview/architecture/#communicating-with-downstream-user-clusters) in the larger context of explaining how Rancher works and how Rancher communicates with downstream clusters. - -### About the kube-api-auth Authentication Webhook - -The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the [authorized cluster endpoint,]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) which is only available for [RKE clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. - -During cluster provisioning, the file `/etc/kubernetes/kube-api-authn-webhook.yaml` is deployed and `kube-apiserver` is configured with `--authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml`. This configures the `kube-apiserver` to query `http://127.0.0.1:6440/v1/authenticate` to determine authentication for bearer tokens. - -The scheduling rules for `kube-api-auth` are listed below: - -_Applies to v2.3.0 and higher_ - -| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | -| -------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | -| kube-api-auth | `beta.kubernetes.io/os:NotIn:windows`
`node-role.kubernetes.io/controlplane:In:"true"` | none | `operator:Exists` | diff --git a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/cluster-project-roles/_index.md b/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/cluster-project-roles/_index.md deleted file mode 100644 index 4260529341f..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/cluster-project-roles/_index.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Cluster and Project Roles -weight: 5 ---- - -Cluster and project roles define user authorization inside a cluster or project. You can manage these roles from the **Global > Security > Roles** page. - -### Membership and Role Assignment - -The projects and clusters accessible to non-administrative users is determined by _membership_. Membership is a list of users who have access to a specific cluster or project based on the roles they were assigned in that cluster or project. Each cluster and project includes a tab that a user with the appropriate permissions can use to manage membership. - -When you create a cluster or project, Rancher automatically assigns you as the `Owner` for it. Users assigned the `Owner` role can assign other users roles in the cluster or project. - -> **Note:** Non-administrative users cannot access any existing projects/clusters by default. A user with appropriate permissions (typically the owner) must explicitly assign the project and cluster membership. - -### Cluster Roles - -_Cluster roles_ are roles that you can assign to users, granting them access to a cluster. There are two primary cluster roles: `Owner` and `Member`. - -- **Cluster Owner:** - - These users have full control over the cluster and all resources in it. - -- **Cluster Member:** - - These users can view most cluster level resources and create new projects. - -#### Custom Cluster Roles - -Rancher lets you assign _custom cluster roles_ to a standard user instead of the typical `Owner` or `Member` roles. These roles can be either a built-in custom cluster role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a cluster. See the table below for a list of built-in custom cluster roles. - -#### Cluster Role Reference - -The following table lists each built-in custom cluster role available and whether that level of access is included in the default cluster-level permissions, `Cluster Owner` and `Cluster Member`. - -| Built-in Cluster Role | Owner | Member | -| ---------------------------------- | ------------- | --------------------------------- | -| Create Projects | ✓ | ✓ | -| Manage Cluster Backups             | ✓ | | -| Manage Cluster Catalogs | ✓ | | -| Manage Cluster Members | ✓ | | -| Manage Nodes | ✓ | | -| Manage Storage | ✓ | | -| View All Projects | ✓ | | -| View Cluster Catalogs | ✓ | ✓ | -| View Cluster Members | ✓ | ✓ | -| View Nodes | ✓ | ✓ | - -For details on how each cluster role can access Kubernetes resources, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Clusters** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. - -> **Note:** ->When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - -### Giving a Custom Cluster Role to a Cluster Member - -After an administrator [sets up a custom cluster role,]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/#adding-a-custom-role) cluster owners and admins can then assign those roles to cluster members. - -To assign a custom role to a new cluster member, you can use the Rancher UI. To modify the permissions of an existing member, you will need to use the Rancher API view. - -To assign the role to a new cluster member, - -1. Go to the **Cluster** view, then go to the **Members** tab. -1. Click **Add Member.** Then in the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. -1. Click **Create.** - -**Result:** The member has the assigned role. - -To assign any custom role to an existing cluster member, - -1. Go to the member you want to give the role to. Click the **⋮ > View in API.** -1. In the **roleTemplateId** field, go to the drop-down menu and choose the role you want to assign to the member. Click **Show Request** and **Send Request.** - -**Result:** The member has the assigned role. - -### Project Roles - -_Project roles_ are roles that can be used to grant users access to a project. There are three primary project roles: `Owner`, `Member`, and `Read Only`. - -- **Project Owner:** - - These users have full control over the project and all resources in it. - -- **Project Member:** - - These users can manage project-scoped resources like namespaces and workloads, but cannot manage other project members. - -- **Read Only:** - - These users can view everything in the project but cannot create, update, or delete anything. - - >**Caveat:** - > - >Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `owner` or `member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. - - -#### Custom Project Roles - -Rancher lets you assign _custom project roles_ to a standard user instead of the typical `Owner`, `Member`, or `Read Only` roles. These roles can be either a built-in custom project role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a project. See the table below for a list of built-in custom project roles. - -#### Project Role Reference - -The following table lists each built-in custom project role available in Rancher and whether it is also granted by the `Owner`, `Member`, or `Read Only` role. - -| Built-in Project Role | Owner | Member | Read Only | -| ---------------------------------- | ------------- | ----------------------------- | ------------- | -| Manage Project Members | ✓ | | | -| Create Namespaces | ✓ | ✓ | | -| Manage Config Maps | ✓ | ✓ | | -| Manage Ingress | ✓ | ✓ | | -| Manage Project Catalogs | ✓ | | | -| Manage Secrets | ✓ | ✓ | | -| Manage Service Accounts | ✓ | ✓ | | -| Manage Services | ✓ | ✓ | | -| Manage Volumes | ✓ | ✓ | | -| Manage Workloads | ✓ | ✓ | | -| View Config Maps | ✓ | ✓ | ✓ | -| View Ingress | ✓ | ✓ | ✓ | -| View Project Members | ✓ | ✓ | ✓ | -| View Project Catalogs | ✓ | ✓ | ✓ | -| View Secrets | ✓ | ✓ | ✓ | -| View Service Accounts | ✓ | ✓ | ✓ | -| View Services | ✓ | ✓ | ✓ | -| View Volumes | ✓ | ✓ | ✓ | -| View Workloads | ✓ | ✓ | ✓ | - -> **Notes:** -> ->- Each project role listed above, including `Owner`, `Member`, and `Read Only`, is comprised of multiple rules granting access to various resources. You can view the roles and their rules on the Global > Security > Roles page. ->- When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. ->- The `Manage Project Members` role allows the project owner to manage any members of the project **and** grant them any project scoped role regardless of their access to the project resources. Be cautious when assigning this role out individually. - -### Defining Custom Roles -As previously mentioned, custom roles can be defined for use at the cluster or project level. The context field defines whether the role will appear on the cluster member page, project member page, or both. - -When defining a custom role, you can grant access to specific resources or specify roles from which the custom role should inherit. A custom role can be made up of a combination of specific grants and inherited roles. All grants are additive. This means that defining a narrower grant for a specific resource **will not** override a broader grant defined in a role that the custom role is inheriting from. - -### Default Cluster and Project Roles - -By default, when a standard user creates a new cluster or project, they are automatically assigned an ownership role: either [cluster owner](#cluster-roles) or [project owner](#project-roles). However, in some organizations, these roles may overextend administrative access. In this use case, you can change the default role to something more restrictive, such as a set of individual roles or a custom role. - -There are two methods for changing default cluster/project roles: - -- **Assign Custom Roles**: Create a [custom role]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. - -- **Assign Individual Roles**: Configure multiple [cluster](#cluster-role-reference)/[project](#project-role-reference) roles as default for assignment to the creating user. - - For example, instead of assigning a role that inherits other roles (such as `cluster owner`), you can choose a mix of individual roles (such as `manage nodes` and `manage storage`). - ->**Note:** -> ->- Although you can [lock]({{}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/) a default role, the system still assigns the role to users who create a cluster/project. ->- Only users that create clusters/projects inherit their roles. Users added to the cluster/project membership afterward must be explicitly assigned their roles. - -### Configuring Default Roles for Cluster and Project Creators - -You can change the cluster or project role(s) that are automatically assigned to the creating user. - -1. From the **Global** view, select **Security > Roles** from the main menu. Select either the **Cluster** or **Project** tab. - -1. Find the custom or individual role that you want to use as default. Then edit the role by selecting **⋮ > Edit**. - -1. Enable the role as default. -{{% accordion id="cluster" label="For Clusters" %}} -1. From **Cluster Creator Default**, choose **Yes: Default role for new cluster creation**. -1. Click **Save**. -{{% /accordion %}} -{{% accordion id="project" label="For Projects" %}} -1. From **Project Creator Default**, choose **Yes: Default role for new project creation**. -1. Click **Save**. -{{% /accordion %}} - -1. If you want to remove a default role, edit the permission and select **No** from the default roles option. - -**Result:** The default roles are configured based on your changes. Roles assigned to cluster/project creators display a check in the **Cluster/Project Creator Default** column. - -### Cluster Membership Revocation Behavior - -When you revoke the cluster membership for a standard user that's explicitly assigned membership to both the cluster _and_ a project within the cluster, that standard user [loses their cluster roles](#clus-roles) but [retains their project roles](#proj-roles). In other words, although you have revoked the user's permissions to access the cluster and its nodes, the standard user can still: - -- Access the projects they hold membership in. -- Exercise any [individual project roles](#project-role-reference) they are assigned. - -If you want to completely revoke a user's access within a cluster, revoke both their cluster and project memberships. diff --git a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/custom-global-roles/_index.md b/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/custom-global-roles/_index.md deleted file mode 100644 index 150011ee05e..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/custom-global-roles/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Custom Global Roles -weight: 3 ---- - -This page is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/global-permissions/_index.md b/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/global-permissions/_index.md deleted file mode 100644 index 0486688a0af..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/global-permissions/_index.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: Global Permissions -weight: 4 ---- - -_Permissions_ are individual access rights that you can assign when selecting a custom permission for a user. - -Global Permissions define user authorization outside the scope of any particular cluster. Out-of-the-box, there are three default global permissions: `Administrator`, `Standard User` and `User-base`. - -- **Administrator:** These users have full control over the entire Rancher system and all clusters within it. - -- **Standard User:** These users can create new clusters and use them. Standard users can also assign other users permissions to their clusters. - -- **User-Base:** User-Base users have login-access only. - -You cannot update or delete the built-in Global Permissions. - -This section covers the following topics: - -- [Global permission assignment](#global-permission-assignment) - - [Global permissions for new local users](#global-permissions-for-new-local-users) - - [Global permissions for users with external authentication](#global-permissions-for-users-with-external-authentication) -- [Custom global permissions](#custom-global-permissions) - - [Custom global permissions reference](#custom-global-permissions-reference) - - [Configuring default global permissions for new users](#configuring-default-global-permissions) - - [Configuring global permissions for existing individual users](#configuring-global-permissions-for-existing-individual-users) - - [Configuring global permissions for groups](#configuring-global-permissions-for-groups) - - [Refreshing group memberships](#refreshing-group-memberships) - -# Global Permission Assignment - -Global permissions for local users are assigned differently than users who log in to Rancher using external authentication. - -### Global Permissions for New Local Users - -When you create a new local user, you assign them a global permission as you complete the **Add User** form. - -To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column. You can [change the default global permissions to meet your needs.](#configuring-default-global-permissions) - -### Global Permissions for Users with External Authentication - -When a user logs into Rancher using an external authentication provider for the first time, they are automatically assigned the **New User Default** global permissions. By default, Rancher assigns the **Standard User** permission for new users. - -To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column, and you can [change them to meet your needs.](#configuring-default-global-permissions) - -Permissions can be assigned to an individual user with [these steps.](#configuring-global-permissions-for-existing-individual-users) - -As of Rancher v2.4.0, you can [assign a role to everyone in the group at the same time](#configuring-global-permissions-for-groups) if the external authentication provider supports groups. - -# Custom Global Permissions - -Using custom permissions is convenient for providing users with narrow or specialized access to Rancher. - -When a user from an [external authentication source]({{}}/rancher/v2.x/en/admin-settings/authentication/) signs into Rancher for the first time, they're automatically assigned a set of global permissions (hereafter, permissions). By default, after a user logs in for the first time, they are created as a user and assigned the default `user` permission. The standard `user` permission allows users to login and create clusters. - -However, in some organizations, these permissions may extend too much access. Rather than assigning users the default global permissions of `Administrator` or `Standard User`, you can assign them a more restrictive set of custom global permissions. - -The default roles, Administrator and Standard User, each come with multiple global permissions built into them. The Administrator role includes all global permissions, while the default user role includes three global permissions: Create Clusters, Use Catalog Templates, and User Base, which is equivalent to the minimum permission to log in to Rancher. In other words, the custom global permissions are modularized so that if you want to change the default user role permissions, you can choose which subset of global permissions are included in the new default user role. - -Administrators can enforce custom global permissions in multiple ways: - -- [Changing the default permissions for new users](#configuring-default-global-permissions) -- [Editing the permissions of an existing user](#configuring-global-permissions-for-individual-users) -- [Assigning a custom global permission to a group](#assigning-a-custom-global-permission-to-a-group) - -### Custom Global Permissions Reference - -The following table lists each custom global permission available and whether it is included in the default global permissions, `Administrator`, `Standard User` and `User-Base`. - -| Custom Global Permission | Administrator | Standard User | User-Base | -| ---------------------------------- | ------------- | ------------- |-----------| -| Create Clusters | ✓ | ✓ | | -| Create RKE Templates | ✓ | ✓ | | -| Manage Authentication | ✓ | | | -| Manage Catalogs | ✓ | | | -| Manage Cluster Drivers | ✓ | | | -| Manage Node Drivers | ✓ | | | -| Manage PodSecurityPolicy Templates | ✓ | | | -| Manage Roles | ✓ | | | -| Manage Settings | ✓ | | | -| Manage Users | ✓ | | | -| Use Catalog Templates | ✓ | ✓ | | -| User Base\* (Basic log-in access) | ✓ | ✓ | | - -> \*This role has two names: -> -> - When you go to the Users tab and edit a user's global role, this role is called Login Access in the custom global permissions list. -> - When you go to the Security tab and edit the roles from the roles page, this role is called User Base. - -For details on which Kubernetes resources correspond to each global permission, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Global** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. - -> **Notes:** -> -> - Each permission listed above is comprised of multiple individual permissions not listed in the Rancher UI. For a full list of these permissions and the rules they are comprised of, access through the API at `/v3/globalRoles`. -> - When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - -### Configuring Default Global Permissions - -If you want to restrict the default permissions for new users, you can remove the `user` permission as default role and then assign multiple individual permissions as default instead. Conversely, you can also add administrative permissions on top of a set of other standard permissions. - -> **Note:** Default roles are only assigned to users added from an external authentication provider. For local users, you must explicitly assign global permissions when adding a user to Rancher. You can customize these global permissions when adding the user. - -To change the default global permissions that are assigned to external users upon their first log in, follow these steps: - -1. From the **Global** view, select **Security > Roles** from the main menu. Make sure the **Global** tab is selected. - -1. Find the permissions set that you want to add or remove as a default. Then edit the permission by selecting **⋮ > Edit**. - -1. If you want to add the permission as a default, Select **Yes: Default role for new users** and then click **Save**. - -1. If you want to remove a default permission, edit the permission and select **No** from **New User Default**. - -**Result:** The default global permissions are configured based on your changes. Permissions assigned to new users display a check in the **New User Default** column. - -### Configuring Global Permissions for Existing Individual Users - -To configure permission for a user, - -1. Go to the **Users** tab. - -1. On this page, go to the user whose access level you want to change and click **⋮ > Edit.** - -1. In the **Global Permissions** section, click **Custom.** - -1. Check the boxes for each subset of permissions you want the user to have access to. - -1. Click **Save.** - -> **Result:** The user's global permissions have been updated. - -### Configuring Global Permissions for Groups - -If you have a group of individuals that need the same level of access in Rancher, it can save time to assign permissions to the entire group at once, so that the users in the group have the appropriate level of access the first time they sign into Rancher. - -After you assign a custom global role to a group, the custom global role will be assigned to a user in the group when they log in to Rancher. - -For existing users, the new permissions will take effect when the users log out of Rancher and back in again, or when an administrator [refreshes the group memberships.](#refreshing-group-memberships) - -For new users, the new permissions take effect when the users log in to Rancher for the first time. New users from this group will receive the permissions from the custom global role in addition to the **New User Default** global permissions. By default, the **New User Default** permissions are equivalent to the **Standard User** global role, but the default permissions can be [configured.](#configuring-default-global-permissions) - -If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have any remaining roles that were assigned to them, which would typically include the roles marked as **New User Default.** Rancher will remove the permissions that are associated with the group when the user logs out, or when an administrator [refreshes group memberships,]((#refreshing-group-memberships)) whichever comes first. - -> **Prerequisites:** You can only assign a global role to a group if: -> -> * You have set up an [external authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-vs-local-authentication) -> * The external authentication provider supports [user groups]({{}}/rancher/v2.x/en/admin-settings/authentication/user-groups/) -> * You have already set up at least one user group with the authentication provider - -To assign a custom global role to a group, follow these steps: - -1. From the **Global** view, go to **Security > Groups.** -1. Click **Assign Global Role.** -1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. -1. In the **Global Permissions,** **Custom,** and/or **Built-in** sections, select the permissions that the group should have. -1. Click **Create.** - -**Result:** The custom global role will take effect when the users in the group log into Rancher. - -### Refreshing Group Memberships - -When an administrator updates the global permissions for a group, the changes take effect for individual group members after they log out of Rancher and log in again. - -To make the changes take effect immediately, an administrator or cluster owner can refresh group memberships. - -An administrator might also want to refresh group memberships if a user is removed from a group in the external authentication service. In that case, the refresh makes Rancher aware that the user was removed from the group. - -To refresh group memberships, - -1. From the **Global** view, click **Security > Users.** -1. Click **Refresh Group Memberships.** - -**Result:** Any changes to the group members' permissions will take effect. diff --git a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/kubectl/_index.md b/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/kubectl/_index.md deleted file mode 100644 index d47673f71f4..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/kubectl/_index.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: "Access a Cluster with Kubectl and kubeconfig" -description: "Learn how you can access and manage your Kubernetes clusters using kubectl with kubectl Shell or with kubectl CLI and kubeconfig file. A kubeconfig file is used to configure access to Kubernetes. When you create a cluster with Rancher, it automatically creates a kubeconfig for your cluster." -weight: 6 ---- - -This section describes how to manipulate your downstream Kubernetes cluster with kubectl from the Rancher UI or from your workstation. - -For more information on using kubectl, see [Kubernetes Documentation: Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/). - -- [Accessing clusters with kubectl shell in the Rancher UI](#accessing-clusters-with-kubectl-shell-in-the-rancher-ui) -- [Accessing clusters with kubectl from your workstation](#accessing-clusters-with-kubectl-from-your-workstation) -- [Note on Resources created using kubectl](#note-on-resources-created-using-kubectl) -- [Authenticating Directly with a Downstream Cluster](#authenticating-directly-with-a-downstream-cluster) - - [Connecting Directly to Clusters with FQDN Defined](#connecting-directly-to-clusters-with-fqdn-defined) - - [Connecting Directly to Clusters without FQDN Defined](#connecting-directly-to-clusters-without-fqdn-defined) - - -### Accessing Clusters with kubectl Shell in the Rancher UI - -You can access and manage your clusters by logging into Rancher and opening the kubectl shell in the UI. No further configuration necessary. - -1. From the **Global** view, open the cluster that you want to access with kubectl. - -2. Click **Launch kubectl**. Use the window that opens to interact with your Kubernetes cluster. - -### Accessing Clusters with kubectl from Your Workstation - -This section describes how to download your cluster's kubeconfig file, launch kubectl from your workstation, and access your downstream cluster. - -This alternative method of accessing the cluster allows you to authenticate with Rancher and manage your cluster without using the Rancher UI. - -> **Prerequisites:** These instructions assume that you have already created a Kubernetes cluster, and that kubectl is installed on your workstation. For help installing kubectl, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - -1. Log into Rancher. From the **Global** view, open the cluster that you want to access with kubectl. -1. Click **Kubeconfig File**. -1. Copy the contents displayed to your clipboard. -1. Paste the contents into a new file on your local computer. Move the file to `~/.kube/config`. Note: The default location that kubectl uses for the kubeconfig file is `~/.kube/config`, but you can use any directory and specify it using the `--kubeconfig` flag, as in this command: - ``` - kubectl --kubeconfig /custom/path/kube.config get pods - ``` -1. From your workstation, launch kubectl. Use it to interact with your kubernetes cluster. - - -### Note on Resources Created Using kubectl - -Rancher will discover and show resources created by `kubectl`. However, these resources might not have all the necessary annotations on discovery. If an operation (for instance, scaling the workload) is done to the resource using the Rancher UI/API, this may trigger recreation of the resources due to the missing annotations. This should only happen the first time an operation is done to the discovered resource. - -# Authenticating Directly with a Downstream Cluster - -This section intended to help you set up an alternative method to access an [RKE cluster.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters) - -This method is only available for RKE clusters that have the [authorized cluster endpoint]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) enabled. When Rancher creates this RKE cluster, it generates a kubeconfig file that includes additional kubectl context(s) for accessing your cluster. This additional context allows you to use kubectl to authenticate with the downstream cluster without authenticating through Rancher. For a longer explanation of how the authorized cluster endpoint works, refer to [this page.](../ace) - -We recommend that as a best practice, you should set up this method to access your RKE cluster, so that just in case you can’t connect to Rancher, you can still access the cluster. - -> **Prerequisites:** The following steps assume that you have created a Kubernetes cluster and followed the steps to [connect to your cluster with kubectl from your workstation.](#accessing-clusters-with-kubectl-from-your-workstation) - -To find the name of the context(s) in your downloaded kubeconfig file, run: - -``` -kubectl config get-contexts --kubeconfig /custom/path/kube.config -CURRENT NAME CLUSTER AUTHINFO NAMESPACE -* my-cluster my-cluster user-46tmn - my-cluster-controlplane-1 my-cluster-controlplane-1 user-46tmn -``` - -In this example, when you use `kubectl` with the first context, `my-cluster`, you will be authenticated through the Rancher server. - -With the second context, `my-cluster-controlplane-1`, you would authenticate with the authorized cluster endpoint, communicating with an downstream RKE cluster directly. - -We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) - -Now that you have the name of the context needed to authenticate directly with the cluster, you can pass the name of the context in as an option when running kubectl commands. The commands will differ depending on whether your cluster has an FQDN defined. Examples are provided in the sections below. - -When `kubectl` works normally, it confirms that you can access your cluster while bypassing Rancher's authentication proxy. - -### Connecting Directly to Clusters with FQDN Defined - -If an FQDN is defined for the cluster, a single context referencing the FQDN will be created. The context will be named `-fqdn`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. - -Assuming the kubeconfig file is located at `~/.kube/config`: - -``` -kubectl --context -fqdn get nodes -``` -Directly referencing the location of the kubeconfig file: -``` -kubectl --kubeconfig /custom/path/kube.config --context -fqdn get pods -``` - -### Connecting Directly to Clusters without FQDN Defined - -If there is no FQDN defined for the cluster, extra contexts will be created referencing the IP address of each node in the control plane. Each context will be named `-`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. - -Assuming the kubeconfig file is located at `~/.kube/config`: -``` -kubectl --context - get nodes -``` -Directly referencing the location of the kubeconfig file: -``` -kubectl --kubeconfig /custom/path/kube.config --context - get pods -``` diff --git a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/project-members/_index.md b/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/project-members/_index.md deleted file mode 100644 index c58f5848c34..00000000000 --- a/content/rancher/v2.5/en/ecm/access-control/rbac/mcm/project-members/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Adding Users to Projects -weight: 2 ---- - -If you want to provide a user with access and permissions to _specific_ projects and resources within a cluster, assign the user a project membership. - -You can add members to a project as it is created, or add them to an existing project. - ->**Tip:** Want to provide a user with access to _all_ projects within a cluster? See [Adding Cluster Members]({{}}/rancher/v2.x/en/cluster-provisioning/cluster-members/) instead. - -### Adding Members to a New Project - -You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) - -### Adding Members to an Existing Project - -Following project creation, you can add users as project members so that they can access its resources. - -1. From the **Global** view, open the project that you want to add members to. - -2. From the main menu, select **Members**. Then click **Add Member**. - -3. Search for the user or group that you want to add to the project. - - If external authentication is configured: - - - Rancher returns users from your external authentication source as you type. - - - A drop-down allows you to add groups instead of individual users. The dropdown only lists groups that you, the logged in user, are included in. - - >**Note:** If you are logged in as a local user, external users do not display in your search results. - -1. Assign the user or group **Project** roles. - - [What are Project Roles?]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) - - >**Notes:** - > - >- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. - > - >- For `Custom` roles, you can modify the list of individual roles available for assignment. - > - > - To add roles to the list, [Add a Custom Role]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles). - > - To remove roles from the list, [Lock/Unlock Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/). - -**Result:** The chosen users are added to the project. - -- To revoke project membership, select the user and click **Delete**. This action deletes membership, not the user. -- To modify a user's roles in the project, delete them from the project, and then re-add them with modified roles. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/backing-up-a-cluster/_index.md b/content/rancher/v2.5/en/ecm/backing-up-a-cluster/_index.md deleted file mode 100644 index 343f8f6f841..00000000000 --- a/content/rancher/v2.5/en/ecm/backing-up-a-cluster/_index.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: Backing up a Cluster -weight: 7 ---- - -In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) can be easily performed. - -Rancher recommends configuring recurrent `etcd` snapshots for all production clusters. Additionally, one-time snapshots can easily be taken as well. - -Snapshots of the etcd database are taken and saved either [locally onto the etcd nodes](#local-backup-target) or to a [S3 compatible target](#s3-backup-target). The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. - -This section covers the following topics: - -- [How snapshots work](#how-snapshots-work) -- [Configuring recurring snapshots](#configuring-recurring-snapshots) -- [One-time snapshots](#one-time-snapshots) -- [Snapshot backup targets](#snapshot-backup-targets) - - [Local backup target](#local-backup-target) - - [S3 backup target](#s3-backup-target) - - [Using a custom CA certificate for S3](#using-a-custom-ca-certificate-for-s3) - - [IAM Support for storing snapshots in S3](#iam-support-for-storing-snapshots-in-s3) -- [Viewing available snapshots](#viewing-available-snapshots) -- [Safe timestamps](#safe-timestamps) -- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) - -# How Snapshots Work - -{{% tabs %}} -{{% tab "Rancher v2.4.0+" %}} -When Rancher creates a snapshot, it includes three components: - -- The cluster data in etcd -- The Kubernetes version -- The cluster configuration in the form of the `cluster.yml` - -Because the Kubernetes version is now included in the snapshot, it is possible to restore a cluster to a prior Kubernetes version. - -The multiple components of the snapshot allow you to select from the following options if you need to a cluster from a snapshot: - -- **Restore just the etcd contents:** This restoration is similar to restoring to snapshots in Rancher prior to v2.4.0. -- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. -- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. - -It's always recommended to take a new snapshot before any upgrades. -{{% /tab %}} -{{% tab "Rancher prior to v2.4.0" %}} -When Rancher creates a snapshot, only the etcd data is included in the snapshot. - -Because the Kubernetes version is not included in the snapshot, there is no option to restore a cluster to a different Kubernetes version. - -It's always recommended to take a new snapshot before any upgrades. -{{% /tab %}} -{{% /tabs %}} - -# Configuring Recurring Snapshots - -Select how often you want recurring snapshots to be taken as well as how many snapshots to keep. The amount of time is measured in hours. With timestamped snapshots, the user has the ability to do a point-in-time recovery. - -By default, [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) are configured to take recurring snapshots (saved to local disk). To protect against local disk failure, using the [S3 Target](#s3-backup-target) or replicating the path on disk is advised. - -During cluster provisioning or editing the cluster, the configuration for snapshots can be found in the advanced section for **Cluster Options**. Click on **Show advanced options**. - -In the **Advanced Cluster Options** section, there are several options available to configure: - -| Option | Description | Default Value| -| --- | ---| --- | -|[etcd Snapshot Backup Target](#snapshot-backup-targets)| Select where you want the snapshots to be saved. Options are either local or in S3 | local| -|Recurring etcd Snapshot Enabled| Enable/Disable recurring snapshots | Yes| -|[Recurring etcd Snapshot Creation Period](#snapshot-creation-period-and-retention-count) | Time in hours between recurring snapshots| 12 hours | -|[Recurring etcd Snapshot Retention Count](#snapshot-creation-period-and-retention-count)| Number of snapshots to retain| 6 | - -# One-Time Snapshots - -In addition to recurring snapshots, you may want to take a "one-time" snapshot. For example, before upgrading the Kubernetes version of a cluster it's best to backup the state of the cluster to protect against upgrade failure. - -1. In the **Global** view, navigate to the cluster that you want to take a one-time snapshot. - -2. Click the **⋮ > Snapshot Now**. - -**Result:** Based on your [snapshot backup target](#snapshot-backup-targets), a one-time snapshot will be taken and saved in the selected backup target. - -# Snapshot Backup Targets - -Rancher supports two different backup targets: - -* [Local Target](#local-backup-target) -* [S3 Target](#s3-backup-target) - -### Local Backup Target - -By default, the `local` backup target is selected. The benefits of this option is that there is no external configuration. Snapshots are automatically saved locally to the etcd nodes in the [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) in `/opt/rke/etcd-snapshots`. All recurring snapshots are taken at configured intervals. The downside of using the `local` backup target is that if there is a total disaster and _all_ etcd nodes are lost, there is no ability to restore the cluster. - -### S3 Backup Target - -The `S3` backup target allows users to configure a S3 compatible backend to store the snapshots. The primary benefit of this option is that if the cluster loses all the etcd nodes, the cluster can still be restored as the snapshots are stored externally. Rancher recommends external targets like `S3` backup, however its configuration requirements do require additional effort that should be considered. - -| Option | Description | Required| -|---|---|---| -|S3 Bucket Name| S3 bucket name where backups will be stored| *| -|S3 Region|S3 region for the backup bucket| | -|S3 Region Endpoint|S3 regions endpoint for the backup bucket|* | -|S3 Access Key|S3 access key with permission to access the backup bucket|*| -|S3 Secret Key|S3 secret key with permission to access the backup bucket|*| -| Custom CA Certificate | A custom certificate used to access private S3 backends || - -### Using a custom CA certificate for S3 - -The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 back end uses a self-signed or custom certificate, provide a custom certificate using the `Custom CA Certificate` option to connect to the S3 backend. - -### IAM Support for Storing Snapshots in S3 - -The `S3` backup target supports using IAM authentication to AWS API in addition to using API credentials. An IAM role gives temporary permissions that an application can use when making API calls to S3 storage. To use IAM authentication, the following requirements must be met: - - - The cluster etcd nodes must have an instance role that has read/write access to the designated backup bucket. - - The cluster etcd nodes must have network access to the specified S3 endpoint. - - The Rancher Server worker node(s) must have an instance role that has read/write to the designated backup bucket. - - The Rancher Server worker node(s) must have network access to the specified S3 endpoint. - - To give an application access to S3, refer to the AWS documentation on [Using an IAM Role to Grant Permissions to Applications Running on Amazon EC2 Instances.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) - -# Viewing Available Snapshots - -The list of all available snapshots for the cluster is available in the Rancher UI. - -1. In the **Global** view, navigate to the cluster that you want to view snapshots. - -2. Click **Tools > Snapshots** from the navigation bar to view the list of saved snapshots. These snapshots include a timestamp of when they were created. - -# Safe Timestamps - -As of v2.2.6, snapshot files are timestamped to simplify processing the files using external tools and scripts, but in some S3 compatible backends, these timestamps were unusable. As of Rancher v2.3.0, the option `safe_timestamp` is added to support compatible file names. When this flag is set to `true`, all special characters in the snapshot filename timestamp are replaced. - -This option is not available directly in the UI, and is only available through the `Edit as Yaml` interface. - -# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 - -If you have any Rancher launched Kubernetes clusters that were created prior to v2.2.0, after upgrading Rancher, you must [edit the cluster]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots prior to v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/). diff --git a/content/rancher/v2.5/en/ecm/best-practices/_index.md b/content/rancher/v2.5/en/ecm/best-practices/_index.md deleted file mode 100644 index 3c88c0ac9c6..00000000000 --- a/content/rancher/v2.5/en/ecm/best-practices/_index.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Best Practices Guide -weight: 3 ---- - -The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. - -If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. - -Use the navigation bar on the left to find the current best practices for managing and deploying the Rancher Server. - -For more guidance on best practices, you can consult these resources: - -- [Rancher Docs]({{}}) - - [Monitoring]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) - - [Backups and Disaster Recovery]({{}}/rancher/v2.x/en/backups/) - - [Security]({{}}/rancher/v2.x/en/security/) -- [Rancher Blog](https://rancher.com/blog/) - - [Articles about best practices on the Rancher blog](https://rancher.com/tags/best-practices/) - - [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) -- [Rancher Forum](https://forums.rancher.com/) -- [Rancher Users Slack](https://slack.rancher.io/) -- [Rancher Labs YouTube Channel - Online Meetups, Demos, Training, and Webinars](https://www.youtube.com/channel/UCh5Xtp82q8wjijP8npkVTBA/featured) diff --git a/content/rancher/v2.5/en/ecm/best-practices/mcm/_index.md b/content/rancher/v2.5/en/ecm/best-practices/mcm/_index.md deleted file mode 100644 index 569f3ed7602..00000000000 --- a/content/rancher/v2.5/en/ecm/best-practices/mcm/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Recommendations for the Enterprise Cluster Manager -weight: 2 ---- - -> This section is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/best-practices/mcm/architecture-recommendations/_index.md b/content/rancher/v2.5/en/ecm/best-practices/mcm/architecture-recommendations/_index.md deleted file mode 100644 index fd381be31e5..00000000000 --- a/content/rancher/v2.5/en/ecm/best-practices/mcm/architecture-recommendations/_index.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: Architecture Recommendations -weight: 3 ---- - -Kubernetes cluster. If you are installing Rancher on a single node, the main architecture recommendation that applies to your installation is that the node running Rancher should be [separate from downstream clusters.](#separation-of-rancher-and-user-clusters) - -This section covers the following topics: - -- [Separation of Rancher and User Clusters](#separation-of-rancher-and-user-clusters) -- [Why HA is Better for Rancher in Production](#why-ha-is-better-for-rancher-in-production) -- [Recommended Load Balancer Configuration for Kubernetes Installations](#recommended-load-balancer-configuration-for-ha-installations) -- [Environment for Kubernetes Installations](#environment-for-ha-installations) -- [Recommended Node Roles for Kubernetes Installations](#recommended-node-roles-for-ha-installations) -- [Architecture for an Authorized Cluster Endpoint](#architecture-for-an-authorized-cluster-endpoint) - -# Separation of Rancher and User Clusters - -A user cluster is a downstream Kubernetes cluster that runs your apps and services. - -If you have a Docker installation of Rancher, the node running the Rancher server should be separate from your downstream clusters. - -In Kubernetes installations of Rancher, the Rancher server cluster should also be separate from the user clusters. - -![Separation of Rancher Server from User Clusters]({{}}/img/rancher/rancher-architecture-separation-of-rancher-server.svg) - -# Why HA is Better for Rancher in Production - -We recommend installing the Rancher server on a high-availability Kubernetes cluster, primarily because it protects the Rancher server data. In a high-availability installation, a load balancer serves as the single point of contact for clients, distributing network traffic across multiple servers in the cluster and helping to prevent any one server from becoming a point of failure. - -We don't recommend installing Rancher in a single Docker container, because if the node goes down, there is no copy of the cluster data available on other nodes and you could lose the data on your Rancher server. - -Rancher needs to be installed on either a high-availability [RKE (Rancher Kubernetes Engine)]({{}}/rke/latest/en/) Kubernetes cluster, or a high-availability [K3s (Lightweight Kubernetes)]({{}}/k3s/latest/en/) Kubernetes cluster. Both RKE and K3s are fully certified Kubernetes distributions. - -### K3s Kubernetes Cluster Installations - -If you are installing Rancher v2.4 for the first time, we recommend installing it on a K3s Kubernetes cluster. One main advantage of this K3s architecture is that it allows an external datastore to hold the cluster data, allowing the K3s server nodes to be treated as ephemeral. - -The option to install Rancher on a K3s cluster is a feature introduced in Rancher v2.4. K3s is easy to install, with half the memory of Kubernetes, all in a binary less than 100 MB. - -
Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server
-![Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server]({{}}/img/rancher/k3s-server-storage.svg) - -### RKE Kubernetes Cluster Installations - -If you are installing Rancher prior to v2.4, you will need to install Rancher on an RKE cluster, in which the cluster data is stored on each node with the etcd role. As of Rancher v2.4, there is no migration path to transition the Rancher server from an RKE cluster to a K3s cluster. All versions of the Rancher server, including v2.4+, can be installed on an RKE cluster. - -In an RKE installation, the cluster data is replicated on each of three etcd nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails. - -
Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server
-![Architecture of an RKE Kubernetes cluster running the Rancher management server]({{}}/img/rancher/rke-server-storage.svg) - -# Recommended Load Balancer Configuration for Kubernetes Installations - -We recommend the following configurations for the load balancer and Ingress controllers: - -* The DNS for Rancher should resolve to a Layer 4 load balancer (TCP) -* The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -* The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -* The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at Ingress controllers
-![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) - -# Environment for Kubernetes Installations - -It is strongly recommended to install Rancher on a Kubernetes cluster on hosted infrastructure such as Amazon's EC2 or Google Compute Engine. - -For the best performance and greater security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. - -It is not recommended to install Rancher on top of a managed Kubernetes service such as Amazon’s EKS or Google Kubernetes Engine. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. - -# Recommended Node Roles for Kubernetes Installations - -Our recommendations for the roles of each node differ depending on whether Rancher is installed on a K3s Kubernetes cluster or an RKE Kubernetes cluster. - -### K3s Cluster Roles - -In K3s clusters, there are two types of nodes: server nodes and agent nodes. Both servers and agents can have workloads scheduled on them. Server nodes run the Kubernetes master. - -For the cluster running the Rancher management server, we recommend using two server nodes. Agent nodes are not required. - -### RKE Cluster Roles - -If Rancher is installed on an RKE Kubernetes cluster, the cluster should have three nodes, and each node should have all three Kubernetes roles: etcd, controlplane, and worker. - -### Contrasting RKE Cluster Architecture for Rancher Server and for Downstream Kubernetes Clusters - -Our recommendation for RKE node roles on the Rancher server cluster contrasts with our recommendations for the downstream user clusters that run your apps and services. - -Rancher uses RKE as a library when provisioning downstream Kubernetes clusters. Note: The capability to provision downstream K3s clusters will be added in a future version of Rancher. - -For downstream Kubernetes clusters, we recommend that each node in a user cluster should have a single role for stability and scalability. - -![Kubernetes Roles for Nodes in Rancher Server Cluster vs. User Clusters]({{}}/img/rancher/rancher-architecture-node-roles.svg) - -RKE only requires at least one node with each role and does not require nodes to be restricted to one role. However, for the clusters that run your apps, we recommend separate roles for each node so that workloads on worker nodes don't interfere with the Kubernetes master or cluster data as your services scale. - -We recommend that downstream user clusters should have at least: - -- **Three nodes with only the etcd role** to maintain a quorum if one node is lost, making the state of your cluster highly available -- **Two nodes with only the controlplane role** to make the master component highly available -- **One or more nodes with only the worker role** to run the Kubernetes node components, as well as the workloads for your apps and services - -With that said, it is safe to use all three roles on three nodes when setting up the Rancher server because: - -* It allows one `etcd` node failure. -* It maintains multiple instances of the master components by having multiple `controlplane` nodes. -* No other workloads than Rancher itself should be created on this cluster. - -Because no additional workloads will be deployed on the Rancher server cluster, in most cases it is not necessary to use the same architecture that we recommend for the scalability and reliability of downstream clusters. - -For more best practices for downstream clusters, refer to the [production checklist]({{}}/rancher/v2.x/en/cluster-provisioning/production) or our [best practices guide.]({{}}/rancher/v2.x/en/best-practices/management/#tips-for-scaling-and-reliability) - -# Architecture for an Authorized Cluster Endpoint - -If you are using an [authorized cluster endpoint,]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) we recommend creating an FQDN pointing to a load balancer which balances traffic across your nodes with the `controlplane` role. - -If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate, which will be included in the generated kubeconfig file to validate the certificate chain. See the documentation on [kubeconfig files]({{}}/rancher/v2.x/en/k8s-in-rancher/kubeconfig/) and [API keys]({{}}/rancher/v2.x/en/user-settings/api-keys/#creating-an-api-key) for more information. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/best-practices/mcm/containers/_index.md b/content/rancher/v2.5/en/ecm/best-practices/mcm/containers/_index.md deleted file mode 100644 index 83f1cc182ec..00000000000 --- a/content/rancher/v2.5/en/ecm/best-practices/mcm/containers/_index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Tips for Setting Up Containers -weight: 100 ---- - -Running well built containers can greatly impact the overall performance and security of your environment. - -Below are a few tips for setting up your containers. - -For a more detailed discussion of security for containers, you can also refer to Rancher's [Guide to Container Security.](https://rancher.com/complete-guide-container-security) - -### Use a Common Container OS - -When possible, you should try to standardize on a common container base OS. - -Smaller distributions such as Alpine and BusyBox reduce container image size and generally have a smaller attack/vulnerability surface. - -Popular distributions such as Ubuntu, Fedora, and CentOS are more field-tested and offer more functionality. - -### Start with a FROM scratch container -If your microservice is a standalone static binary, you should use a FROM scratch container. - -The FROM scratch container is an [official Docker image](https://hub.docker.com/_/scratch) that is empty so that you can use it to design minimal images. - -This will have the smallest attack surface and smallest image size. - -### Run Container Processes as Unprivileged -When possible, use a non-privileged user when running processes within your container. While container runtimes provide isolation, vulnerabilities and attacks are still possible. Inadvertent or accidental host mounts can also be impacted if the container is running as root. For details on configuring a security context for a pod or container, refer to the [Kubernetes docs](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - -### Define Resource Limits -Apply CPU and memory limits to your pods. This can help manage the resources on your worker nodes and avoid a malfunctioning microservice from impacting other microservices. - -In standard Kubernetes, you can set resource limits on the namespace level. In Rancher, you can set resource limits on the project level and they will propagate to all the namespaces within the project. For details, refer to the Rancher docs. - -When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project or namespace, all containers will require a respective CPU or Memory field set during creation. To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. - -The Kubernetes docs have more information on how resource limits can be set at the [container level](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container) and the namespace level. - -### Define Resource Requirements -You should apply CPU and memory requirements to your pods. This is crucial for informing the scheduler which type of compute node your pod needs to be placed on, and ensuring it does not over-provision that node. In Kubernetes, you can set a resource requirement by defining `resources.requests` in the resource requests field in a pod's container spec. For details, refer to the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container). - -> **Note:** If you set a resource limit for the namespace that the pod is deployed in, and the container doesn't have a specific resource request, the pod will not be allowed to start. To avoid setting these fields on each and every container during workload creation, a default container resource limit can be specified on the namespace. - -It is recommended to define resource requirements on the container level because otherwise, the scheduler makes assumptions that will likely not be helpful to your application when the cluster experiences load. - -### Liveness and Readiness Probes -Set up liveness and readiness probes for your container. Unless your container completely crashes, Kubernetes will not know it's unhealthy unless you create an endpoint or mechanism that can report container status. Alternatively, make sure your container halts and crashes if unhealthy. - -The Kubernetes docs show how to [configure liveness and readiness probes for containers.](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) diff --git a/content/rancher/v2.5/en/ecm/best-practices/mcm/deployment-strategies/_index.md b/content/rancher/v2.5/en/ecm/best-practices/mcm/deployment-strategies/_index.md deleted file mode 100644 index cd6d01bb1c4..00000000000 --- a/content/rancher/v2.5/en/ecm/best-practices/mcm/deployment-strategies/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Rancher Deployment Strategies -weight: 100 ---- - -There are two recommended deployment strategies. Each one has its own pros and cons. Read more about which one would fit best for your use case: - -* [Hub and Spoke](#hub-and-spoke) -* [Regional](#regional) - -# Hub & Spoke Strategy ---- - -In this deployment scenario, there is a single Rancher control plane managing Kubernetes clusters across the globe. The control plane would be run on a high-availability Kubernetes cluster, and there would be impact due to latencies. - -{{< img "/img/rancher/bpg/hub-and-spoke.png" "Hub and Spoke Deployment">}} - -### Pros - -* Environments could have nodes and network connectivity across regions. -* Single control plane interface to view/see all regions and environments. -* Kubernetes does not require Rancher to operate and can tolerate losing connectivity to the Rancher control plane. - -### Cons - -* Subject to network latencies. -* If the control plane goes out, global provisioning of new services is unavailable until it is restored. However, each Kubernetes cluster can continue to be managed individually. - -# Regional Strategy ---- -In the regional deployment model a control plane is deployed in close proximity to the compute nodes. - -{{< img "/img/rancher/bpg/regional.png" "Regional Deployment">}} - -### Pros - -* Rancher functionality in regions stay operational if a control plane in another region goes down. -* Network latency is greatly reduced, improving the performance of functionality in Rancher. -* Upgrades of the Rancher control plane can be done independently per region. - -### Cons - -* Overhead of managing multiple Rancher installations. -* Visibility across global Kubernetes clusters requires multiple interfaces/panes of glass. -* Deploying multi-cluster apps in Rancher requires repeating the process for each Rancher server. diff --git a/content/rancher/v2.5/en/ecm/best-practices/mcm/deployment-types/_index.md b/content/rancher/v2.5/en/ecm/best-practices/mcm/deployment-types/_index.md deleted file mode 100644 index ff493e7fbf2..00000000000 --- a/content/rancher/v2.5/en/ecm/best-practices/mcm/deployment-types/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Tips for Running Rancher -weight: 100 ---- - -A high-availability Kubernetes installation, defined as an installation of Rancher on a Kubernetes cluster with at least three nodes, should be used in any production installation of Rancher, as well as any installation deemed "important." Multiple Rancher instances running on multiple nodes ensure high availability that cannot be accomplished with a single node environment. - -When you set up your high-availability Rancher installation, consider the following: - -### Run Rancher on a Separate Cluster -Don't run other workloads or microservices in the Kubernetes cluster that Rancher is installed on. - -### Don't Run Rancher on a Hosted Kubernetes Environment -When the Rancher server is installed on a Kubernetes cluster, it should not be run in a hosted Kubernetes environment such as Google's GKE, Amazon's EKS, or Microsoft's AKS. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. - -It is strongly recommended to use hosted infrastructure such as Amazon's EC2 or Google's GCE instead. When you create a cluster using RKE on an infrastructure provider, you can configure the cluster to create etcd snapshots as a backup. You can then [use RKE]({{}}/rke/latest/en/etcd-snapshots/) or [Rancher]({{}}/rancher/v2.x/en/backups/restorations/) to restore your cluster from one of these snapshots. In a hosted Kubernetes environment, this backup and restore functionality is not supported. - -### Make sure nodes are configured correctly for Kubernetes ### -It's important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node, checking that all correct ports are opened, and deploying with ssd backed etcd. More details can be found in the [kubernetes docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) and [etcd's performance op guide](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/performance.md) - -### When using RKE: Backup the Statefile -RKE keeps record of the cluster state in a file called `cluster.rkestate`. This file is important for the recovery of a cluster and/or the continued maintenance of the cluster through RKE. Because this file contains certificate material, we strongly recommend encrypting this file before backing up. After each run of `rke up` you should backup the state file. - -### Run All Nodes in the Cluster in the Same Datacenter -For best performance, run all three of your nodes in the same geographic datacenter. If you are running nodes in the cloud, such as AWS, run each node in a separate Availability Zone. For example, launch node 1 in us-west-2a, node 2 in us-west-2b, and node 3 in us-west-2c. - -### Development and Production Environments Should be Similar -It's strongly recommended to have a "staging" or "pre-production" environment of the Kubernetes cluster that Rancher runs on. This environment should mirror your production environment as closely as possible in terms of software and hardware configuration. - -### Monitor Your Clusters to Plan Capacity -The Rancher server's Kubernetes cluster should run within the [system and hardware requirements]({{}}/rancher/v2.x/en/installation/requirements/) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. - -However, metrics-driven capacity planning analysis should be the ultimate guidance for scaling Rancher, because the published requirements take into account a variety of workload types. - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution, and Grafana, which lets you visualize the metrics from Prometheus. - -After you [enable monitoring]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) in the cluster, you can set up [a notification channel]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) and [cluster alerts]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) to let you know if your cluster is approaching its capacity. You can also use the Prometheus and Grafana monitoring framework to establish a baseline for key metrics as you scale. - diff --git a/content/rancher/v2.5/en/ecm/best-practices/mcm/management/_index.md b/content/rancher/v2.5/en/ecm/best-practices/mcm/management/_index.md deleted file mode 100644 index 4fd202dc1ec..00000000000 --- a/content/rancher/v2.5/en/ecm/best-practices/mcm/management/_index.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: Tips for Scaling, Security and Reliability -weight: 101 ---- - -Rancher allows you to set up numerous combinations of configurations. Some configurations are more appropriate for development and testing, while there are other best practices for production environments for maximum availability and fault tolerance. The following best practices should be followed for production. - -# Tips for Preventing and Handling Problems - -These tips can help you solve problems before they happen. - -### Run Rancher on a Supported OS and Supported Docker Version -Rancher is container-based and can potentially run on any Linux-based operating system. However, only operating systems listed in the [requirements documentation]({{}}/rancher/v2.x/en/installation/requirements/) should be used for running Rancher, along with a supported version of Docker. These versions have been most thoroughly tested and can be properly supported by the Rancher Support team. - -### Upgrade Your Kubernetes Version -Keep your Kubernetes cluster up to date with a recent and supported version. Typically the Kubernetes community will support the current version and previous three minor releases (for example, 1.14.x, 1.13.x, 1.12.x, and 1.11.x). After a new version is released, the third-oldest supported version reaches EOL (End of Life) status. Running on an EOL release can be a risk if a security issues are found and patches are not available. The community typically makes minor releases every quarter (every three months). - -Rancher’s SLAs are not community dependent, but as Kubernetes is a community-driven software, the quality of experience will degrade as you get farther away from the community's supported target. - -### Kill Pods Randomly During Testing -Run chaoskube or a similar mechanism to randomly kill pods in your test environment. This will test the resiliency of your infrastructure and the ability of Kubernetes to self-heal. It's not recommended to run this in your production environment. - -### Deploy Complicated Clusters with Terraform -Rancher's "Add Cluster" UI is preferable for getting started with Kubernetes cluster orchestration or for simple use cases. However, for more complex or demanding use cases, it is recommended to use a CLI/API driven approach. [Terraform](https://www.terraform.io/) is recommended as the tooling to implement this. When you use Terraform with version control and a CI/CD environment, you can have high assurances of consistency and reliability when deploying Kubernetes clusters. This approach also gives you the most customization options. - -Rancher [maintains a Terraform provider](https://rancher.com/blog/2019/rancher-2-terraform-provider/) for working with Rancher 2.0 Kubernetes. It is called the [Rancher2 Provider.](https://www.terraform.io/docs/providers/rancher2/index.html) - -### Upgrade Rancher in a Staging Environment -All upgrades, both patch and feature upgrades, should be first tested on a staging environment before production is upgraded. The more closely the staging environment mirrors production, the higher chance your production upgrade will be successful. - -### Renew Certificates Before they Expire -Multiple people in your organization should set up calendar reminders for certificate renewal. Consider renewing the certificate two weeks to one month in advance. If you have multiple certificates to track, consider using [monitoring and alerting mechanisms]({{}}/rancher/v2.x/en/cluster-admin/tools/) to track certificate expiration. - -Rancher-provisioned Kubernetes clusters will use certificates that expire in one year. Clusters provisioned by other means may have a longer or shorter expiration. - -Certificates can be renewed for Rancher-provisioned clusters [through the Rancher user interface]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/). - -### Enable Recurring Snapshots for Backing up and Restoring the Cluster -Make sure etcd recurring snapshots are enabled. Extend the snapshot retention to a period of time that meets your business needs. In the event of a catastrophic failure or deletion of data, this may be your only recourse for recovery. For details about configuring snapshots, refer to the [RKE documentation]({{}}/rke/latest/en/etcd-snapshots/) or the [Rancher documentation on backups]({{}}/rancher/v2.x/en/backups/). - -### Provision Clusters with Rancher -When possible, use Rancher to provision your Kubernetes cluster rather than importing a cluster. This will ensure the best compatibility and supportability. - -### Use Stable and Supported Rancher Versions for Production -Do not upgrade production environments to alpha, beta, release candidate (rc), or "latest" versions. These early releases are often not stable and may not have a future upgrade path. - -When installing or upgrading a non-production environment to an early release, anticipate problems such as features not working, data loss, outages, and inability to upgrade without a reinstall. - -Make sure the feature version you are upgrading to is considered "stable" as determined by Rancher. Use the beta, release candidate, and "latest" versions in a testing, development, or demo environment to try out new features. Feature version upgrades, for example 2.1.x to 2.2.x, should be considered as and when they are released. Some bug fixes and most features are not back ported into older versions. - -Keep in mind that Rancher does End of Life support for old versions, so you will eventually want to upgrade if you want to continue to receive patches. - -For more detail on what happens during the Rancher product lifecycle, refer to the [Support Maintenance Terms](https://rancher.com/support-maintenance-terms/). - -# Network Topology -These tips can help Rancher work more smoothly with your network. - -### Use Low-latency Networks for Communication Within Clusters -Kubernetes clusters are best served by low-latency networks. This is especially true for the control plane components and etcd, where lots of coordination and leader election traffic occurs. Networking between Rancher server and the Kubernetes clusters it manages are more tolerant of latency. - -### Allow Rancher to Communicate Directly with Clusters -Limit the use of proxies or load balancers between Rancher server and Kubernetes clusters. As Rancher is maintaining a long-lived web sockets connection, these intermediaries can interfere with the connection lifecycle as they often weren't configured with this use case in mind. - - -# Tips for Scaling and Reliability -These tips can help you scale your cluster more easily. - -### Use One Kubernetes Role Per Host -Separate the etcd, control plane, and worker roles onto different hosts. Don't assign multiple roles to the same host, such as a worker and control plane. This will give you maximum scalability. - -### Run the Control Plane and etcd on Virtual Machines -Run your etcd and control plane nodes on virtual machines where you can scale vCPU and memory easily if needed in the future. - -### Use at Least Three etcd Nodes -Provision 3 or 5 etcd nodes. Etcd requires a quorum to determine a leader by the majority of nodes, therefore it is not recommended to have clusters of even numbers. Three etcd nodes is generally sufficient for smaller clusters and five etcd nodes for large clusters. - -### Use at Least Two Control Plane Nodes -Provision two or more control plane nodes. Some control plane components, such as the `kube-apiserver`, run in [active-active](https://www.jscape.com/blog/active-active-vs-active-passive-high-availability-cluster) mode and will give you more scalability. Other components such as kube-scheduler and kube-controller run in active-passive mode (leader elect) and give you more fault tolerance. - -### Monitor Your Cluster -Closely monitor and scale your nodes as needed. You should [enable cluster monitoring]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) and use the Prometheus metrics and Grafana visualization options as a starting point. - - -# Tips for Security -Below are some basic tips for increasing security in Rancher. For more detailed information about securing your cluster, you can refer to these resources: - -- Rancher's [security documentation and Kubernetes cluster hardening guide]({{}}/rancher/v2.x/en/security/) -- [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) - -### Update Rancher with Security Patches -Keep your Rancher installation up to date with the latest patches. Patch updates have important software fixes and sometimes have security fixes. When patches with security fixes are released, customers with Rancher licenses are notified by e-mail. These updates are also posted on Rancher's [forum](https://forums.rancher.com/). - -### Report Security Issues Directly to Rancher -If you believe you have uncovered a security-related problem in Rancher, please communicate this immediately and discretely to the Rancher team (security@rancher.com). Posting security issues on public forums such as Twitter, Rancher Slack, GitHub, etc. can potentially compromise security for all Rancher customers. Reporting security issues discretely allows Rancher to assess and mitigate the problem. Security patches are typically given high priority and released as quickly as possible. - -### Only Upgrade One Component at a Time -In addition to Rancher software updates, closely monitor security fixes for related software, such as Docker, Linux, and any libraries used by your workloads. For production environments, try to avoid upgrading too many entities during a single maintenance window. Upgrading multiple components can make it difficult to root cause an issue in the event of a failure. As business requirements allow, upgrade one component at a time. - -# Tips for Multi-Tenant Clusters - -### Namespaces -Each tenant should have their own unique namespaces within the cluster. This avoids naming conflicts and allows resources to be only visible to their owner through use of RBAC policy - -### Project Isolation -Use Rancher's Project Isolation to automatically generate Network Policy between Projects (sets of Namespaces). This further protects workloads from interference - -### Resource Limits -Enforce use of sane resource limit definitions for every deployment in your cluster. This not only protects the owners of the deployment, but the neighboring resources from other tenants as well. Remember, namespaces do not isolate at the node level, so over-consumption of resources on a node affects other namespace deployments. Admission controllers can be written to require resource limit definitions - -### Resource Requirements -Enforce use of resource requirement definitions for each deployment in your cluster. This enables the scheduler to appropriately schedule workloads. Otherwise you will eventually end up with over-provisioned nodes. - -# Class of Service and Kubernetes Clusters -A class of service describes the expectations around cluster uptime, durability, and duration of maintenance windows. Typically organizations group these characteristics into labels such as "dev" or "prod" - -### Consider fault domains -Kubernetes clusters can span multiple classes of service, however it is important to consider the ability for one workload to affect another. Without proper deployment practices such as resource limits, requirements, etc, a deployment that is not behaving well has the potential to impact the health of the cluster. In a "dev" environment it is common for end-users to exercise less caution with deployments, thus increasing the chance of such behavior. Sharing this behavior with your production workload increases risk. - -### Upgrade risks -Upgrades of Kubernetes are not without risk, the best way to predict the outcome of an upgrade is try it on a cluster of similar load and use case as your production cluster. This is where having non-prod class of service clusters can be advantageous. - -### Resource Efficiency -Clusters can be built with varying degrees of redundancy. In a class of service with low expectations for uptime, resources and cost can be conserved by building clusters without redundant Kubernetes control components. This approach may also free up more budget/resources to increase the redundancy at the production level - -# Network Security -In general, you can use network security best practices in your Rancher and Kubernetes clusters. Consider the following: - -### Use a Firewall Between your Hosts and the Internet -Firewalls should be used between your hosts and the Internet (or corporate Intranet). This could be enterprise firewall appliances in a datacenter or SDN constructs in the cloud, such as VPCs, security groups, ingress, and egress rules. Try to limit inbound access only to ports and IP addresses that require it. Outbound access can be shut off (air gap) if environment sensitive information that requires this restriction. If available, use firewalls with intrusion detection and DDoS prevention. - -### Run Periodic Security Scans -Run security and penetration scans on your environment periodically. Even with well design infrastructure, a poorly designed microservice could compromise the entire environment. diff --git a/content/rancher/v2.5/en/ecm/best-practices/rancher-server/_index.md b/content/rancher/v2.5/en/ecm/best-practices/rancher-server/_index.md deleted file mode 100644 index 1bd764c11f0..00000000000 --- a/content/rancher/v2.5/en/ecm/best-practices/rancher-server/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Best Practices for the Rancher Server -weight: 1 ---- - -> This page is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/cloning-clusters/_index.md b/content/rancher/v2.5/en/ecm/cloning-clusters/_index.md deleted file mode 100644 index c5315fa3f37..00000000000 --- a/content/rancher/v2.5/en/ecm/cloning-clusters/_index.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: Cloning Clusters -weight: 18 ---- - -If you have a cluster in Rancher that you want to use as a template for creating similar clusters, you can use Rancher CLI to clone the cluster's configuration, edit it, and then use it to quickly launch the cloned cluster. - -Duplication of imported clusters is not supported. - -| Cluster Type | Cloneable? | -|----------------------------------|---------------| -| [Nodes Hosted by Infrastructure Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) | ✓ | -| [Hosted Kubernetes Providers]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) | ✓ | -| [Custom Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes) | ✓ | -| [Imported Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) | | - -> **Warning:** During the process of duplicating a cluster, you will edit a config file full of cluster settings. However, we recommend editing only values explicitly listed in this document, as cluster duplication is designed for simple cluster copying, _not_ wide scale configuration changes. Editing other values may invalidate the config file, which will lead to cluster deployment failure. - -## Prerequisites - -Download and install [Rancher CLI]({{}}/rancher/v2.x/en/cli). Remember to [create an API bearer token]({{}}/rancher/v2.x/en/user-settings/api-keys) if necessary. - - -## 1. Export Cluster Config - -Begin by using Rancher CLI to export the configuration for the cluster that you want to clone. - -1. Open Terminal and change your directory to the location of the Rancher CLI binary, `rancher`. - -1. Enter the following command to list the clusters managed by Rancher. - - - ./rancher cluster ls - - -1. Find the cluster that you want to clone, and copy either its resource `ID` or `NAME` to your clipboard. From this point on, we'll refer to the resource `ID` or `NAME` as ``, which is used as a placeholder in the next step. - -1. Enter the following command to export the configuration for your cluster. - - - ./rancher clusters export - - - **Step Result:** The YAML for a cloned cluster prints to Terminal. - -1. Copy the YAML to your clipboard and paste it in a new file. Save the file as `cluster-template.yml` (or any other name, as long as it has a `.yml` extension). - -## 2. Modify Cluster Config - -Use your favorite text editor to modify the cluster configuration in `cluster-template.yml` for your cloned cluster. - -> **Note:** As of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) - -1. Open `cluster-template.yml` (or whatever you named your config) in your favorite text editor. - - >**Warning:** Only edit the cluster config values explicitly called out below. Many of the values listed in this file are used to provision your cloned cluster, and editing their values may break the provisioning process. - - -1. As depicted in the example below, at the `` placeholder, replace your original cluster's name with a unique name (``). If your cloned cluster has a duplicate name, the cluster will not provision successfully. - - ```yml - Version: v3 - clusters: - : # ENTER UNIQUE NAME - dockerRootDir: /var/lib/docker - enableNetworkPolicy: false - rancherKubernetesEngineConfig: - addonJobTimeout: 30 - authentication: - strategy: x509 - authorization: {} - bastionHost: {} - cloudProvider: {} - ignoreDockerVersion: true - ``` - -1. For each `nodePools` section, replace the original nodepool name with a unique name at the `` placeholder. If your cloned cluster has a duplicate nodepool name, the cluster will not provision successfully. - - ```yml - nodePools: - : - clusterId: do - controlPlane: true - etcd: true - hostnamePrefix: mark-do - nodeTemplateId: do - quantity: 1 - worker: true - ``` - -1. When you're done, save and close the configuration. - -## 3. Launch Cloned Cluster - -Move `cluster-template.yml` into the same directory as the Rancher CLI binary. Then run this command: - - ./rancher up --file cluster-template.yml - -**Result:** Your cloned cluster begins provisioning. Enter `./rancher cluster ls` to confirm. You can also log into the Rancher UI and open the **Global** view to watch your provisioning cluster's progress. diff --git a/content/rancher/v2.5/en/ecm/cluster-configuration/_index.md b/content/rancher/v2.5/en/ecm/cluster-configuration/_index.md deleted file mode 100644 index 526e9de80b7..00000000000 --- a/content/rancher/v2.5/en/ecm/cluster-configuration/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: RKE Cluster Configuration -weight: 6 ---- - -After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. To edit your cluster, open the **Global** view, make sure the **Clusters** tab is selected, and then select **⋮ > Edit** for the cluster that you want to edit. - -To Edit an Existing Cluster -![Edit Cluster]({{}}/img/rancher/edit-cluster.png) - -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table" %}} - -## Editing Cluster Membership - -Cluster administrators can [edit the membership for a cluster,]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members) controlling which Rancher users can access the cluster and what features they can use. - -## Cluster Options - -When editing clusters, clusters that are [launched using RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) feature more options than clusters that are imported or hosted by a Kubernetes provider. The headings that follow document options available only for RKE clusters. - -### Updating ingress-nginx - -Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. - -If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delete these pods to get the correct version for your deployment. - -# Editing Other Cluster Options - -In [clusters launched by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), you can edit any of the remaining options that follow. - ->**Note:** These options are not available for imported clusters or hosted Kubernetes clusters. - -Options for RKE Clusters -![Cluster Options]({{}}/img/rancher/cluster-options.png) - - -Option | Description | ----------|----------| - Kubernetes Version | The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes]({{}}/rancher/v2.x/en/cluster-admin/upgrading-kubernetes). | - Network Provider | The [container networking interface]({{}}/rancher/v2.x/en/faq/networking/#cni-providers) that powers networking for your cluster.

**Note:** You can only choose this option while provisioning your cluster. It cannot be edited later. | - Project Network Isolation | As of Rancher v2.0.7, if you're using the Canal network provider, you can choose whether to enable or disable inter-project communication. | - Nginx Ingress | If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use Nginx ingress within the cluster. | - Metrics Server Monitoring | Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. | - Pod Security Policy Support | Enables [pod security policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. | - Docker version on nodes | Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a [supported Docker version]({{}}/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/), Rancher will stop pods from running on nodes that don't have a supported Docker version installed. | - Docker Root Directory | The directory on your cluster nodes where you've installed Docker. If you install Docker on your nodes to a non-default directory, update this path. | - Default Pod Security Policy | If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. | - Cloud Provider | If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. | -
- -# Editing Cluster as YAML - ->**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. - -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. - -- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. -- To read from an existing RKE file, click **Read from File**. - -In Rancher v2.0.0-v2.2.x, the config file is identical to the [cluster config file for the Rancher Kubernetes Engine]({{}}/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the [cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) - -![image]({{}}/img/rancher/cluster-options-yaml.png) - -For an example of RKE config file syntax, see the [RKE documentation]({{}}/rke/latest/en/example-yamls/). diff --git a/content/rancher/v2.5/en/ecm/config-private-registry/_index.md b/content/rancher/v2.5/en/ecm/config-private-registry/_index.md deleted file mode 100644 index 1ba3e37b8f8..00000000000 --- a/content/rancher/v2.5/en/ecm/config-private-registry/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Configuring a Global Default Private Registry -weight: 10 ---- - -You might want to use a private Docker registry to share your custom base images within your organization. With a private registry, you can keep a private, consistent, and centralized source of truth for the Docker images that are used in your clusters. - -There are two main ways to set up private registries in Rancher: by setting up the global default registry through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. - -This section is about configuring the global default private registry, and focuses on how to configure the registry from the Rancher UI after Rancher is installed. - -For instructions on setting up a private registry with command line options during the installation of Rancher, refer to the [air gapped Docker installation]({{}}/rancher/v2.x/en/installation/air-gap-single-node) or [air gapped Kubernetes installation]({{}}/rancher/v2.x/en/installation/air-gap-high-availability) instructions. - -If your private registry requires credentials, it cannot be used as the default registry. There is no global way to set up a private registry with authorization for every Rancher-provisioned cluster. Therefore, if you want a Rancher-provisioned cluster to pull images from a private registry with credentials, you will have to [pass in the registry credentials through the advanced cluster options](#provisioning-clusters-with-private-registries-that-require-credentials) every time you create a new cluster. - -# Setting a Private Registry with No Credentials as the Default Registry - -1. Log into Rancher and configure the default administrator password. - -1. Go into the **Settings** view. - - {{< img "/img/rancher/airgap/settings.png" "Settings" >}} - -1. Look for the setting called `system-default-registry` and choose **Edit**. - - {{< img "/img/rancher/airgap/edit-system-default-registry.png" "Edit" >}} - -1. Change the value to your registry (e.g. `registry.yourdomain.com:port`). Do not prefix the registry with `http://` or `https://`. - - {{< img "/img/rancher/airgap/enter-system-default-registry.png" "Save" >}} - -**Result:** Rancher will use your private registry to pull system images. - -# Setting a Private Registry with Credentials when Deploying a Cluster - -You can follow these steps to configure a private registry when you provision a cluster with Rancher: - -1. When you create a cluster through the Rancher UI, go to the **Cluster Options** section and click **Show Advanced Options.** -1. In the Enable Private Registries section, click **Enabled.** -1. Enter the registry URL and credentials. -1. Click **Save.** - -**Result:** The new cluster will be able to pull images from the private registry. diff --git a/content/rancher/v2.5/en/ecm/disconnecting-nodes/_index.md b/content/rancher/v2.5/en/ecm/disconnecting-nodes/_index.md deleted file mode 100644 index 893e537ffb1..00000000000 --- a/content/rancher/v2.5/en/ecm/disconnecting-nodes/_index.md +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: Disconnecting Nodes from RKE Kubernetes Clusters -description: Learn about cluster cleanup when removing nodes from your Rancher-launched Kubernetes cluster. What is removed, how to do it manually -weight: 11 ---- - -This section describes how to disconnect a node from a Rancher-launched Kubernetes cluster and remove all of the Kubernetes components from the node. This process allows you to use the node for other purposes. - -When you use Rancher to [launch nodes for a cluster]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher), resources (containers/virtual network interfaces) and configuration items (certificates/configuration files) are created. - -When removing nodes from your Rancher launched Kubernetes cluster (provided that they are in `Active` state), those resources are automatically cleaned, and the only action needed is to restart the node. When a node has become unreachable and the automatic cleanup process cannot be used, we describe the steps that need to be executed before the node can be added to a cluster again. - -## What Gets Removed? - -When cleaning nodes provisioned using Rancher, the following components are deleted based on the type of cluster node you're removing. - -| Removed Component | [Nodes Hosted by Infrastructure Provider][1] | [Custom Nodes][2] | [Hosted Cluster][3] | [Imported Nodes][4] | -| ------------------------------------------------------------------------------ | --------------- | ----------------- | ------------------- | ------------------- | -| The Rancher deployment namespace (`cattle-system` by default) | ✓ | ✓ | ✓ | ✓ | -| `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` labeled by Rancher | ✓ | ✓ | ✓ | ✓ | -| Labels, Annotations, and Finalizers | ✓ | ✓ | ✓ | ✓ | -| Rancher Deployment | ✓ | ✓ | ✓ | | -| Machines, clusters, projects, and user custom resource definitions (CRDs) | ✓ | ✓ | ✓ | | -| All resources create under the `management.cattle.io` API Group | ✓ | ✓ | ✓ | | -| All CRDs created by Rancher v2.x | ✓ | ✓ | ✓ | | - -[1]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/ - -## Removing a Node from a Cluster by Rancher UI - -When the node is in `Active` state, removing the node from a cluster will trigger a process to clean up the node. Please restart the node after the automatic cleanup process is done to make sure any non-persistent data is properly removed. - -**To restart a node:** - -``` -# using reboot -$ sudo reboot - -# using shutdown -$ sudo shutdown -r now -``` - -## Removing Rancher Components from a Cluster Manually - -When a node is unreachable and removed from the cluster, the automatic cleaning process can't be triggered because the node is unreachable. Please follow the steps below to manually remove the Rancher components. - ->**Warning:** The commands listed below will remove data from the node. Make sure you have created a backup of files you want to keep before executing any of the commands as data will be lost. - -### Removing Rancher Components from Imported Clusters - -For imported clusters, the process for removing Rancher is a little different. You have the option of simply deleting the cluster in the Rancher UI, or your can run a script that removes Rancher components from the nodes. Both options make the same deletions. - -After the imported cluster is detached from Rancher, the cluster's workloads will be unaffected and you can access the cluster using the same methods that you did before the cluster was imported into Rancher. - -{{% tabs %}} -{{% tab "By UI / API" %}} ->**Warning:** This process will remove data from your cluster. Make sure you have created a backup of files you want to keep before executing the command, as data will be lost. - -After you initiate the removal of an [imported cluster]({{}}/rancher/v2.x/en/cluster-provisioning/#import-existing-cluster) using the Rancher UI (or API), the following events occur. - -1. Rancher creates a `serviceAccount` that it uses to remove the Rancher components from the cluster. This account is assigned the [clusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) and [clusterRoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) permissions, which are required to remove the Rancher components. - -1. Using the `serviceAccount`, Rancher schedules and runs a [job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that cleans the Rancher components off of the cluster. This job also references the `serviceAccount` and its roles as dependencies, so the job deletes them before its completion. - -1. Rancher is removed from the cluster. However, the cluster persists, running the native version of Kubernetes. - -**Result:** All components listed for imported clusters in [What Gets Removed?](#what-gets-removed) are deleted. - -{{% /tab %}} -{{% tab "By Script" %}} -Rather than cleaning imported cluster nodes using the Rancher UI, you can run a script instead. This functionality is available since `v2.1.0`. - ->**Prerequisite:** -> ->Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -1. Open a web browser, navigate to [GitHub](https://github.com/rancher/rancher/blob/master/cleanup/user-cluster.sh), and download `user-cluster.sh`. - -1. Make the script executable by running the following command from the same directory as `user-cluster.sh`: - - ``` - chmod +x user-cluster.sh - ``` - -1. **Air Gap Environments Only:** Open `user-cluster.sh` and replace `yaml_url` with the URL in `user-cluster.yml`. - - If you don't have an air gap environment, skip this step. - -1. From the same directory, run the script and provide the `rancher/rancher-agent` image version which should be equal to the version of Rancher used to manage the cluster. (``): - - >**Tip:** - > - >Add the `-dry-run` flag to preview the script's outcome without making changes. - ``` - ./user-cluster.sh rancher/rancher-agent: - ``` - -**Result:** The script runs. All components listed for imported clusters in [What Gets Removed?](#what-gets-removed) are deleted. - -{{% /tab %}} -{{% /tabs %}} - -### Windows Nodes - -To clean up a Windows node, you can run a cleanup script located in `c:\etc\rancher`. The script deletes Kubernetes generated resources and the execution binary. It also drops the firewall rules and network settings. - -To run the script, you can use this command in the PowerShell: - -``` -pushd c:\etc\rancher -.\cleanup.ps1 -popd -``` - -**Result:** The node is reset and can be re-added to a Kubernetes cluster. - -### Docker Containers, Images, and Volumes - -Based on what role you assigned to the node, there are Kubernetes components in containers, containers belonging to overlay networking, DNS, ingress controller and Rancher agent. (and pods you created that have been scheduled to this node) - -**To clean all Docker containers, images and volumes:** - -``` -docker rm -f $(docker ps -qa) -docker rmi -f $(docker images -q) -docker volume rm $(docker volume ls -q) -``` - -### Mounts - -Kubernetes components and secrets leave behind mounts on the system that need to be unmounted. - -Mounts | ---------| -`/var/lib/kubelet/pods/XXX` (miscellaneous mounts) | -`/var/lib/kubelet` | -`/var/lib/rancher` | - -**To unmount all mounts:** - -``` -for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done -``` - -### Directories and Files - -The following directories are used when adding a node to a cluster, and should be removed. You can remove a directory using `rm -rf /directory_name`. - ->**Note:** Depending on the role you assigned to the node, some of the directories will or won't be present on the node. - -Directories | ---------| -`/etc/ceph` | -`/etc/cni` | -`/etc/kubernetes` | -`/opt/cni` | -`/opt/rke` | -`/run/secrets/kubernetes.io` | -`/run/calico` | -`/run/flannel` | -`/var/lib/calico` | -`/var/lib/etcd` | -`/var/lib/cni` | -`/var/lib/kubelet` | -`/var/lib/rancher/rke/log` | -`/var/log/containers` | -`/var/log/kube-audit` | -`/var/log/pods` | -`/var/run/calico` | - -**To clean the directories:** - -``` -rm -rf /etc/ceph \ - /etc/cni \ - /etc/kubernetes \ - /opt/cni \ - /opt/rke \ - /run/secrets/kubernetes.io \ - /run/calico \ - /run/flannel \ - /var/lib/calico \ - /var/lib/etcd \ - /var/lib/cni \ - /var/lib/kubelet \ - /var/lib/rancher/rke/log \ - /var/log/containers \ - /var/log/kube-audit \ - /var/log/pods \ - /var/run/calico -``` - -### Network Interfaces and Iptables - -The remaining two components that are changed/configured are (virtual) network interfaces and iptables rules. Both are non-persistent to the node, meaning that they will be cleared after a restart of the node. To remove these components, a restart is recommended. - -**To restart a node:** - -``` -# using reboot -$ sudo reboot - -# using shutdown -$ sudo shutdown -r now -``` - -If you want to know more on (virtual) network interfaces or iptables rules, please see the specific subjects below. - -### Network Interfaces - ->**Note:** Depending on the network provider configured for the cluster the node was part of, some of the interfaces will or won't be present on the node. - -Interfaces | ---------| -`flannel.1` | -`cni0` | -`tunl0` | -`caliXXXXXXXXXXX` (random interface names) | -`vethXXXXXXXX` (random interface names) | - -**To list all interfaces:** - -``` -# Using ip -ip address show - -# Using ifconfig -ifconfig -a -``` - -**To remove an interface:** - -``` -ip link delete interface_name -``` - -### Iptables - ->**Note:** Depending on the network provider configured for the cluster the node was part of, some of the chains will or won't be present on the node. - -Iptables rules are used to route traffic from and to containers. The created rules are not persistent, so restarting the node will restore iptables to its original state. - -Chains | ---------| -`cali-failsafe-in` | -`cali-failsafe-out` | -`cali-fip-dnat` | -`cali-fip-snat` | -`cali-from-hep-forward` | -`cali-from-host-endpoint` | -`cali-from-wl-dispatch` | -`cali-fw-caliXXXXXXXXXXX` (random chain names) | -`cali-nat-outgoing` | -`cali-pri-kns.NAMESPACE` (chain per namespace) | -`cali-pro-kns.NAMESPACE` (chain per namespace) | -`cali-to-hep-forward` | -`cali-to-host-endpoint` | -`cali-to-wl-dispatch` | -`cali-tw-caliXXXXXXXXXXX` (random chain names) | -`cali-wl-to-host` | -`KUBE-EXTERNAL-SERVICES` | -`KUBE-FIREWALL` | -`KUBE-MARK-DROP` | -`KUBE-MARK-MASQ` | -`KUBE-NODEPORTS` | -`KUBE-SEP-XXXXXXXXXXXXXXXX` (random chain names) | -`KUBE-SERVICES` | -`KUBE-SVC-XXXXXXXXXXXXXXXX` (random chain names) | - -**To list all iptables rules:** - -``` -iptables -L -t nat -iptables -L -t mangle -iptables -L -``` diff --git a/content/rancher/v2.5/en/ecm/drivers/_index.md b/content/rancher/v2.5/en/ecm/drivers/_index.md deleted file mode 100644 index 1ca1981a571..00000000000 --- a/content/rancher/v2.5/en/ecm/drivers/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Provisioning Drivers -weight: 12 ---- - -Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. - -### Rancher Drivers - -With Rancher drivers, you can enable/disable existing built-in drivers that are packaged in Rancher. Alternatively, you can add your own driver if Rancher has not yet implemented it. - -There are two types of drivers within Rancher: - -* [Cluster Drivers](#cluster-drivers) -* [Node Drivers](#node-drivers) - -## Cluster Drivers - -Cluster drivers are used to provision [hosted Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/), such as GKE, EKS, AKS, etc.. The availability of which cluster driver to display when creating a cluster is defined based on the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters for hosted Kubernetes clusters. By default, Rancher is packaged with several existing cluster drivers, but you can also create custom cluster drivers to add to Rancher. - -By default, Rancher has activated several hosted Kubernetes cloud providers including: - -* [Amazon EKS]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/) -* [Google GKE]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke/) -* [Azure AKS]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/aks/) - -There are several other hosted Kubernetes cloud providers that are disabled by default, but are packaged in Rancher: - -* [Alibaba ACK]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/) -* [Huawei CCE]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/) -* [Tencent]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/) - -## Node Drivers - -Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. - -If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. - -Rancher supports several major cloud providers, but by default, these node drivers are active and available for deployment: - -* [Amazon EC2]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) -* [Azure]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/) -* [Digital Ocean]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/) -* [vSphere]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/) diff --git a/content/rancher/v2.5/en/ecm/drivers/cluster-drivers/_index.md b/content/rancher/v2.5/en/ecm/drivers/cluster-drivers/_index.md deleted file mode 100644 index 03a67524ef3..00000000000 --- a/content/rancher/v2.5/en/ecm/drivers/cluster-drivers/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Cluster Drivers -weight: 1 ---- - -Cluster drivers are used to create clusters in a [hosted Kubernetes provider]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/), such as Google GKE. The availability of which cluster driver to display when creating clusters is defined by the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters. By default, Rancher is packaged with several existing cloud provider cluster drivers, but you can also add custom cluster drivers to Rancher. - -If there are specific cluster drivers that you do not want to show your users, you may deactivate those cluster drivers within Rancher and they will not appear as an option for cluster creation. - -### Managing Cluster Drivers - ->**Prerequisites:** To create, edit, or delete cluster drivers, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Cluster Drivers]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. - -## Activating/Deactivating Cluster Drivers - -By default, Rancher only activates drivers for the most popular cloud providers, Google GKE, Amazon EKS and Azure AKS. If you want to show or hide any node driver, you can change its status. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. - -2. From the **Drivers** page, select the **Cluster Drivers** tab. - -3. Select the driver that you wish to **Activate** or **Deactivate** and select the appropriate icon. - -## Adding Custom Cluster Drivers - -If you want to use a cluster driver that Rancher doesn't support out-of-the-box, you can add the provider's driver in order to start using them to create _hosted_ kubernetes clusters. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. - -2. From the **Drivers** page select the **Cluster Drivers** tab. - -3. Click **Add Cluster Driver**. - -4. Complete the **Add Cluster Driver** form. Then click **Create**. - - -### Developing your own Cluster Driver - -In order to develop cluster driver to add to Rancher, please refer to our [example](https://github.com/rancher-plugins/kontainer-engine-driver-example). diff --git a/content/rancher/v2.5/en/ecm/drivers/node-drivers/_index.md b/content/rancher/v2.5/en/ecm/drivers/node-drivers/_index.md deleted file mode 100644 index 69975a929ff..00000000000 --- a/content/rancher/v2.5/en/ecm/drivers/node-drivers/_index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Node Drivers -weight: 2 ---- - -Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. - -If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. - -#### Managing Node Drivers - ->**Prerequisites:** To create, edit, or delete drivers, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Node Drivers]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. - -## Activating/Deactivating Node Drivers - -By default, Rancher only activates drivers for the most popular cloud providers, Amazon EC2, Azure, DigitalOcean and vSphere. If you want to show or hide any node driver, you can change its status. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. From the **Drivers** page, select the **Node Drivers** tab. In version prior to v2.2.0, you can select **Node Drivers** directly in the navigation bar. - -2. Select the driver that you wish to **Activate** or **Deactivate** and select the appropriate icon. - -## Adding Custom Node Drivers - -If you want to use a node driver that Rancher doesn't support out-of-the-box, you can add that provider's driver in order to start using them to create node templates and eventually node pools for your Kubernetes cluster. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. From the **Drivers** page, select the **Node Drivers** tab. In version prior to v2.2.0, you can select **Node Drivers** directly in the navigation bar. - -2. Click **Add Node Driver**. - -3. Complete the **Add Node Driver** form. Then click **Create**. - -### Developing your own node driver - -Node drivers are implemented with [Docker Machine](https://docs.docker.com/machine/). diff --git a/content/rancher/v2.5/en/ecm/feature-flags/_index.md b/content/rancher/v2.5/en/ecm/feature-flags/_index.md deleted file mode 100644 index 4d766396545..00000000000 --- a/content/rancher/v2.5/en/ecm/feature-flags/_index.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: Enabling Experimental Features -weight: 17 ---- - -> This page is under construction. - -Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type]({{}}/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. - -The features can be enabled in three ways: - -- [Enable features when starting Rancher.](#enabling-features-when-starting-rancher) When installing Rancher with a CLI, you can use a feature flag to enable a feature by default. -- [Enable features from the Rancher UI](#enabling-features-with-the-rancher-ui) in Rancher v2.3.3+ by going to the **Settings** page. -- [Enable features with the Rancher API](#enabling-features-with-the-rancher-api) after installing Rancher. - -Each feature has two values: - -- A default value, which can be configured with a flag or environment variable from the command line -- A set value, which can be configured with the Rancher API or UI - -If no value has been set, Rancher uses the default value. - -Because the API sets the actual value and the command line sets the default value, that means that if you enable or disable a feature with the API or UI, it will override any value set with the command line. - -For example, if you install Rancher, then set a feature flag to true with the Rancher API, then upgrade Rancher with a command that sets the feature flag to false, the default value will still be false, but the feature will still be enabled because it was set with the Rancher API. If you then deleted the set value (true) with the Rancher API, setting it to NULL, the default value (false) would take effect. - -> **Note:** As of v2.4.0, there are some feature flags that may require a restart of the Rancher server container. These features that require a restart are marked in the table of these docs and in the UI. - -The following is a list of the feature flags available in Rancher: - -- `dashboard`: This feature enables the new experimental UI that has a new look and feel. The dashboard also leverages a new API in Rancher which allows the UI to access the default Kubernetes resources without any intervention from Rancher. -- `istio-virtual-service-ui`: This feature enables a [UI to create, read, update, and delete Istio virtual services and destination rules]({{}}/rancher/v2.x/en/installation/options/feature-flags/istio-virtual-service-ui), which are traffic management features of Istio. -- `proxy`: This feature enables Rancher to use a new simplified code base for the proxy, which can help enhance performance and security. The proxy feature is known to have issues with Helm deployments, which prevents any catalog applications to be deployed which includes Rancher's tools like monitoring, logging, Istio, etc. -- `unsupported-storage-drivers`: This feature [allows unsupported storage drivers.]({{}}/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers) In other words, it enables types for storage providers and provisioners that are not enabled by default. - -The below table shows the availability and default value for feature flags in Rancher: - -| Feature Flag Name | Default Value | Status | Available as of | Rancher Restart Required? | -| ----------------------------- | ------------- | ------------ | --------------- |---| -| `dashboard` | `true` | Experimental | v2.4.0 | x | -| `istio-virtual-service-ui` | `false` | Experimental | v2.3.0 | | -| `istio-virtual-service-ui` | `true` | GA | v2.3.2 | | -| `proxy` | `false` | Experimental | v2.4.0 | | -| `unsupported-storage-drivers` | `false` | Experimental | v2.3.0 | | - -# Enabling Features when Starting Rancher - -When you install Rancher, enable the feature you want with a feature flag. The command is different depending on whether you are installing Rancher on a single node or if you are doing a Kubernetes Installation of Rancher. - -> **Note:** Values set from the Rancher API will override the value passed in through the command line. - -{{% tabs %}} -{{% tab "Kubernetes Install" %}} -When installing Rancher with a Helm chart, use the `--features` option. In the below example, two features are enabled by passing the feature flag names names in a comma separated list: - -``` -helm install rancher-latest/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 - --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 -``` - -Note: If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -### Rendering the Helm Chart for Air Gap Installations - -For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher) - -Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list. - -The Helm 3 command is as follows: - -``` -helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts - --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 - --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 -``` - -The Helm 2 command is as follows: - -``` -helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts - --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 - --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 -``` - -{{% /tab %}} -{{% tab "Docker Install" %}} -When installing Rancher with Docker, use the `--features` option. In the below example, two features are enabled by passing the feature flag names in a comma separated list: - -``` -docker run -d -p 80:80 -p 443:443 \ - --restart=unless-stopped \ - rancher/rancher:rancher-latest \ - --features==true,=true # Available as of v2.3.0 -``` - -{{% /tab %}} -{{% /tabs %}} - -# Enabling Features with the Rancher UI - -1. Go to the **Global** view and click **Settings.** -1. Click the **Feature Flags** tab. You will see a list of experimental features. -1. To enable a feature, go to the disabled feature you want to enable and click **⋮ > Activate.** - -**Result:** The feature is enabled. - -### Disabling Features with the Rancher UI - -1. Go to the **Global** view and click **Settings.** -1. Click the **Feature Flags** tab. You will see a list of experimental features. -1. To disable a feature, go to the enabled feature you want to disable and click **⋮ > Deactivate.** - -**Result:** The feature is disabled. - -# Enabling Features with the Rancher API - -1. Go to `/v3/features`. -1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. -1. In the upper left corner of the screen, under **Operations,** click **Edit.** -1. In the **Value** drop-down menu, click **True.** -1. Click **Show Request.** -1. Click **Send Request.** -1. Click **Close.** - -**Result:** The feature is enabled. - -### Disabling Features with the Rancher API - -1. Go to `/v3/features`. -1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. -1. In the upper left corner of the screen, under **Operations,** click **Edit.** -1. In the **Value** drop-down menu, click **False.** -1. Click **Show Request.** -1. Click **Send Request.** -1. Click **Close.** - -**Result:** The feature is disabled. diff --git a/content/rancher/v2.5/en/ecm/feature-flags/enable-not-default-storage-drivers/_index.md b/content/rancher/v2.5/en/ecm/feature-flags/enable-not-default-storage-drivers/_index.md deleted file mode 100644 index 6fcea1be92f..00000000000 --- a/content/rancher/v2.5/en/ecm/feature-flags/enable-not-default-storage-drivers/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Allow Unsupported Storage Drivers -weight: 1 ---- - -This feature allows you to use types for storage providers and provisioners that are not enabled by default. - -To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.x/en/installation/options/feature-flags/) - -Environment Variable Key | Default Value | Description ----|---|--- - `unsupported-storage-drivers` | `false` | This feature enables types for storage providers and provisioners that are not enabled by default. - -### Types for Persistent Volume Plugins that are Enabled by Default -Below is a list of storage types for persistent volume plugins that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: - -Name | Plugin ---------|---------- -Amazon EBS Disk | `aws-ebs` -AzureFile | `azure-file` -AzureDisk | `azure-disk` -Google Persistent Disk | `gce-pd` -Longhorn | `flex-volume-longhorn` -VMware vSphere Volume | `vsphere-volume` -Local | `local` -Network File System | `nfs` -hostPath | `host-path` - -### Types for StorageClass that are Enabled by Default -Below is a list of storage types for a StorageClass that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: - -Name | Plugin ---------|-------- -Amazon EBS Disk | `aws-ebs` -AzureFile | `azure-file` -AzureDisk | `azure-disk` -Google Persistent Disk | `gce-pd` -Longhorn | `flex-volume-longhorn` -VMware vSphere Volume | `vsphere-volume` -Local | `local` \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/feature-flags/istio-virtual-service-ui/_index.md b/content/rancher/v2.5/en/ecm/feature-flags/istio-virtual-service-ui/_index.md deleted file mode 100644 index 5bf7ca6a58e..00000000000 --- a/content/rancher/v2.5/en/ecm/feature-flags/istio-virtual-service-ui/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: UI for Istio Virtual Services and Destination Rules -weight: 2 ---- - -This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. - -> **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup) in order to use the feature. - -To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.x/en/installation/options/feature-flags/) - -Environment Variable Key | Default Value | Status | Available as of ----|---|---|--- -`istio-virtual-service-ui` |`false` | Experimental | v2.3.0 -`istio-virtual-service-ui` | `true` | GA | v2.3.2 - -# About this Feature - -A central advantage of Istio's traffic management features is that they allow dynamic request routing, which is useful for canary deployments, blue/green deployments, or A/B testing. - -When enabled, this feature turns on a page that lets you configure some traffic management features of Istio using the Rancher UI. Without this feature, you need to use `kubectl` to manage traffic with Istio. - -The feature enables two UI tabs: one tab for **Virtual Services** and another for **Destination Rules.** - -- **Virtual services** intercept and direct traffic to your Kubernetes services, allowing you to direct percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) -- **Destination rules** serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule) - -To see these tabs, - -1. Go to the project view in Rancher and click **Resources > Istio.** -1. You will see tabs for **Traffic Graph,** which has the Kiali network visualization integrated into the UI, and **Traffic Metrics,** which shows metrics for the success rate and request volume of traffic to your services, among other metrics. Next to these tabs, you should see the tabs for **Virtual Services** and **Destination Rules.** \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/globaldns/_index.md b/content/rancher/v2.5/en/ecm/globaldns/_index.md deleted file mode 100644 index fe1a88a175c..00000000000 --- a/content/rancher/v2.5/en/ecm/globaldns/_index.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Global DNS -weight: 100 ---- - - > This page is under construction. - -Rancher's Global DNS feature provides a way to program an external DNS provider to route traffic to your Kubernetes applications. Since the DNS programming supports spanning applications across different Kubernetes clusters, Global DNS is configured at a global level. An application can become highly available as it allows you to have one application run on different Kubernetes clusters. If one of your Kubernetes clusters goes down, the application would still be accessible. - -> **Note:** Global DNS is only available in [Kubernetes installations]({{}}/rancher/v2.x/en/installation/k8s-install/) with the [`local` cluster enabled]({{}}/rancher/v2.x/en/installation/options/chart-options/#import-local-cluster). - -## Global DNS Providers - -Prior to adding in Global DNS entries, you will need to configure access to an external provider. - -The following table lists the first version of Rancher each provider debuted. - -| DNS Provider | Available as of | -| --- | --- | -| [AWS Route53](https://aws.amazon.com/route53/) | v2.2.0 | -| [CloudFlare](https://www.cloudflare.com/dns/) | v2.2.0 | -| [AliDNS](https://www.alibabacloud.com/product/dns) | v2.2.0 | - -## Global DNS Entries - -For each application that you want to route traffic to, you will need to create a Global DNS Entry. This entry will use a fully qualified domain name (a.k.a FQDN) from a global DNS provider to target applications. The applications can either resolve to a single [multi-cluster application]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or to specific projects. You must [add specific annotation labels](#adding-annotations-to-ingresses-to-program-the-external-dns) to the ingresses in order for traffic to be routed correctly to the applications. Without this annotation, the programming for the DNS entry will not work. - -## Permissions for Global DNS Providers/Entries - -By default, only [global administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) and the creator of the Global DNS provider or Global DNS entry have access to use, edit and delete them. When creating the provider or entry, the creator can add additional users in order for those users to access and manage them. By default, these members will get `Owner` role to manage them. - -## Setting up Global DNS for Applications - -### Add a Global DNS Provider - -1. From the **Global View**, select **Tools > Global DNS Providers**. -1. To add a provider, choose from the available provider options and configure the Global DNS Provider with necessary credentials and an optional domain. -1. (Optional) Add additional users so they could use the provider when creating Global DNS entries as well as manage the Global DNS provider. - -{{% accordion id="route53" label="Route53" %}} -1. Enter a **Name** for the provider. -1. (Optional) Enter the **Root Domain** of the hosted zone on AWS Route53. If this is not provided, Rancher's Global DNS Provider will work with all hosted zones that the AWS keys can access. -1. Enter the AWS **Access Key**. -1. Enter the AWS **Secret Key**. -1. Under **Member Access**, search for any users that you want to have the ability to use this provider. By adding this user, they will also be able to manage the Global DNS Provider entry. -1. Click **Create**. -{{% /accordion %}} -{{% accordion id="cloudflare" label="CloudFlare" %}} -1. Enter a **Name** for the provider. -1. Enter the **Root Domain**, this field is optional, in case this is not provided, Rancher's Global DNS Provider will work with all domains that the keys can access. -1. Enter the CloudFlare **API Email**. -1. Enter the CloudFlare **API Key**. -1. Under **Member Access**, search for any users that you want to have the ability to use this provider. By adding this user, they will also be able to manage the Global DNS Provider entry. -1. Click **Create**. -{{% /accordion %}} -{{% accordion id="alidns" label="AliDNS" %}} -1. Enter a **Name** for the provider. -1. Enter the **Root Domain**, this field is optional, in case this is not provided, Rancher's Global DNS Provider will work with all domains that the keys can access. -1. Enter the **Access Key**. -1. Enter the **Secret Key**. -1. Under **Member Access**, search for any users that you want to have the ability to use this provider. By adding this user, they will also be able to manage the Global DNS Provider entry. -1. Click **Create**. - ->**Notes:** -> ->- Alibaba Cloud SDK uses TZ data. It needs to be present on `/usr/share/zoneinfo` path of the nodes running [`local` cluster]({{}}/rancher/v2.x/en/installation/options/chart-options/#import-local-cluster), and it is mounted to the external DNS pods. If it is not available on the nodes, please follow the [instruction](https://www.ietf.org/timezones/tzdb-2018f/tz-link.html) to prepare it. ->- Different versions of AliDNS have different allowable TTL range, where the default TTL for a global DNS entry may not be valid. Please see the [reference](https://www.alibabacloud.com/help/doc-detail/34338.htm) before adding an AliDNS entry. -{{% /accordion %}} - -### Add a Global DNS Entry - -1. From the **Global View**, select **Tools > Global DNS Entries**. -1. Click on **Add DNS Entry**. -1. Enter the **FQDN** you wish to program on the external DNS. -1. Select a Global DNS **Provider** from the list. -1. Select if this DNS entry will be for a [multi-cluster application]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or for workloads in different [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). You will need to ensure that [annotations are added to any ingresses](#adding-annotations-to-ingresses-to-program-the-external-dns) for the applications that you want to target. -1. Configure the **DNS TTL** value in seconds. By default, it will be 300 seconds. -1. Under **Member Access**, search for any users that you want to have the ability to manage this Global DNS entry. - -## Adding Annotations to Ingresses to program the External DNS - -In order for Global DNS entries to be programmed, you will need to add a specific annotation on an ingress in your application or target project and this ingress needs to use a specific `hostname` and an annotation that should match the FQDN of the Global DNS entry. - -1. For any application that you want targeted for your Global DNS entry, find an ingress associated with the application. -1. In order for the DNS to be programmed, the following requirements must be met: - * The ingress routing rule must be set to use a `hostname` that matches the FQDN of the Global DNS entry. - * The ingress must have an annotation (`rancher.io/globalDNS.hostname`) and the value of this annotation should match the FQDN of the Global DNS entry. -1. Once the ingress in your [multi-cluster application]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or in your target projects are in `active` state, the FQDN will be programmed on the external DNS against the Ingress IP addresses. - -## Editing a Global DNS Provider - -The [global administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), creator of the Global DNS provider and any users added as `members` to a Global DNS provider, have _owner_ access to that provider. Any members can edit the following fields: - -- Root Domain -- Access Key & Secret Key -- Members - -1. From the **Global View**, select **Tools > Global DNS Providers**. - -1. For the Global DNS provider that you want to edit, click the **⋮ > Edit**. - -## Editing a Global DNS Entry - -The [global administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), creator of the Global DNS entry and any users added as `members` to a Global DNS entry, have _owner_ access to that DNS entry. Any members can edit the following fields: - -- FQDN -- Global DNS Provider -- Target Projects or Multi-Cluster App -- DNS TTL -- Members - -Any users who can access the Global DNS entry can **only** add target projects that they have access to. However, users can remove **any** target project as there is no check to confirm if that user has access to the target project. - -Permission checks are relaxed for removing target projects in order to support situations where the user's permissions might have changed before they were able to delete the target project. Another use case could be that the target project was removed from the cluster before being removed from a target project of the Global DNS entry. - -1. From the **Global View**, select **Tools > Global DNS Entries**. - -1. For the Global DNS entry that you want to edit, click the **⋮ > Edit**. diff --git a/content/rancher/v2.5/en/ecm/infrastructure/_index.md b/content/rancher/v2.5/en/ecm/infrastructure/_index.md deleted file mode 100644 index 2512a53d2a1..00000000000 --- a/content/rancher/v2.5/en/ecm/infrastructure/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Infrastructure Management -weight: 14 ---- \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/infrastructure/cloud-credentials/_index.md b/content/rancher/v2.5/en/ecm/infrastructure/cloud-credentials/_index.md deleted file mode 100644 index a85ca3d3e06..00000000000 --- a/content/rancher/v2.5/en/ecm/infrastructure/cloud-credentials/_index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Managing Cloud Credentials -weight: 3 ---- - -When you create a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. - -Node templates can use cloud credentials to access the credential information required to provision nodes in the infrastructure providers. The same cloud credential can be used by multiple node templates. By using a cloud credential, you do not have to re-enter access keys for the same cloud provider. Cloud credentials are stored as Kubernetes secrets. - -Cloud credentials are only used by node templates if there are fields marked as `password`. The default `active` node drivers have their account access fields marked as `password`, but there may be some `inactive` node drivers, which are not using them yet. These node drivers will not use cloud credentials. - -You can create cloud credentials in two contexts: - -- [During creation of a node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for a cluster. -- In the **User Settings** - -All cloud credentials are bound to the user profile of who created it. They **cannot** be shared across users. - -## Creating a Cloud Credential from User Settings - -1. From your user settings, select **User Avatar > Cloud Credentials**. -1. Click **Add Cloud Credential**. -1. Enter a name for the cloud credential. -1. Select a **Cloud Credential Type** from the drop down. The values of this dropdown is based on the `active` [node drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) in Rancher. -1. Based on the selected cloud credential type, enter the required values to authenticate with the infrastructure provider. -1. Click **Create**. - -**Result:** The cloud credential is created and can immediately be used to [create node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). - -## Updating a Cloud Credential - -When access credentials are changed or compromised, updating a cloud credential allows you to rotate those credentials while keeping the same node template. - -1. From your user settings, select **User Avatar > Cloud Credentials**. -1. Choose the cloud credential you want to edit and click the **⋮ > Edit**. -1. Update the credential information and click **Save**. - -**Result:** The cloud credential is updated with the new access credentials. All existing node templates using this cloud credential will automatically use the updated information whenever [new nodes are added]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). - -## Deleting a Cloud Credential - -In order to delete cloud credentials, there must not be any node template associated with it. If you are unable to delete the cloud credential, [delete any node templates]({{}}/rancher/v2.x/en/user-settings/node-templates/#deleting-a-node-template) that are still associated to that cloud credential. - -1. From your user settings, select **User Avatar > Cloud Credentials**. -1. You can either individually delete a cloud credential or bulk delete. - - - To individually delete one, choose the cloud credential you want to edit and click the **⋮ > Delete**. - - To bulk delete cloud credentials, select one or more cloud credentials from the list. Click **Delete**. -1. Confirm that you want to delete these cloud credentials. diff --git a/content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/_index.md b/content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/_index.md deleted file mode 100644 index 4eb2f7ac57b..00000000000 --- a/content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Setting up Cloud Providers -weight: 4 ---- - -A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. For more information, refer to the [official Kubernetes documentation on cloud providers.](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) - -When a cloud provider is set up in Rancher, the Rancher server can automatically provision new nodes, load balancers or persistent storage devices when launching Kubernetes definitions, if the cloud provider you're using supports such automation. - -Your cluster will not provision correctly if you configure a cloud provider cluster of nodes that do not meet the prerequisites. - -By default, the **Cloud Provider** option is set to `None`. - -The following cloud providers can be enabled: - -* Amazon -* Azure -* GCE (Google Compute Engine) - -### Setting up the Amazon Cloud Provider - -For details on enabling the Amazon cloud provider, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/amazon) - -### Setting up the Azure Cloud Provider - -For details on enabling the Azure cloud provider, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/azure) - -### Setting up the GCE Cloud Provider - -For details on enabling the Google Compute Engine cloud provider, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/gce) - -### Setting up a Custom Cloud Provider - -The `Custom` cloud provider is available if you want to configure any [Kubernetes cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/). - -For the custom cloud provider option, you can refer to the [RKE docs]({{}}/rke/latest/en/config-options/cloud-providers/) on how to edit the yaml file for your specific cloud provider. There are specific cloud providers that have more detailed configuration : - -* [vSphere]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/) -* [Openstack]({{}}/rke/latest/en/config-options/cloud-providers/openstack/) \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/amazon/_index.md b/content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/amazon/_index.md deleted file mode 100644 index 5a6fc9f45fb..00000000000 --- a/content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/amazon/_index.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Setting up the Amazon Cloud Provider -weight: 1 ---- - -When using the `Amazon` cloud provider, you can leverage the following capabilities: - -- **Load Balancers:** Launches an AWS Elastic Load Balancer (ELB) when choosing `Layer-4 Load Balancer` in **Port Mapping** or when launching a `Service` with `type: LoadBalancer`. -- **Persistent Volumes**: Allows you to use AWS Elastic Block Stores (EBS) for persistent volumes. - -See [cloud-provider-aws README](https://github.com/kubernetes/cloud-provider-aws/blob/master/README.md) for all information regarding the Amazon cloud provider. - -To set up the Amazon cloud provider, - -1. [Create an IAM role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) -2. [Configure the ClusterID](#2-configure-the-clusterid) - -### 1. Create an IAM Role and attach to the instances - -All nodes added to the cluster must be able to interact with EC2 so that they can create and remove resources. You can enable this interaction by using an IAM role attached to the instance. See [Amazon documentation: Creating an IAM Role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#create-iam-role) how to create an IAM role. There are two example policies: - -* The first policy is for the nodes with the `controlplane` role. These nodes have to be able to create/remove EC2 resources. The following IAM policy is an example, please remove any unneeded permissions for your use case. -* The second policy is for the nodes with the `etcd` or `worker` role. These nodes only have to be able to retrieve information from EC2. - -While creating an [Amazon EC2 cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/#create-the-amazon-ec2-cluster), you must fill in the **IAM Instance Profile Name** (not ARN) of the created IAM role when creating the **Node Template**. - -While creating a [Custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes), you must manually attach the IAM role to the instance(s). - -IAM Policy for nodes with the `controlplane` role: - -```json -{ -"Version": "2012-10-17", -"Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } -] -} -``` - -IAM policy for nodes with the `etcd` or `worker` role: - -```json -{ -"Version": "2012-10-17", -"Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } -] -} -``` - -### 2. Configure the ClusterID - -The following resources need to tagged with a `ClusterID`: - -- **Nodes**: All hosts added in Rancher. -- **Subnet**: The subnet used for your cluster. -- **Security Group**: The security group used for your cluster. - ->**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating an Elastic Load Balancer (ELB). - -When you create an [Amazon EC2 Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/#create-the-amazon-ec2-cluster), the `ClusterID` is automatically configured for the created nodes. Other resources still need to be tagged manually. - -Use the following tag: - -**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `owned` - -`CLUSTERID` can be any string you like, as long as it is equal across all tags set. - -Setting the value of the tag to `owned` tells the cluster that all resources with this tag are owned and managed by this cluster. If you share resources between clusters, you can change the tag to: - -**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `shared`. - -### Using Amazon Elastic Container Registry (ECR) - -The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/azure/_index.md b/content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/azure/_index.md deleted file mode 100644 index 25884572579..00000000000 --- a/content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/azure/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Setting up the Azure Cloud Provider -weight: 2 ---- - -When using the `Azure` cloud provider, you can leverage the following capabilities: - -- **Load Balancers:** Launches an Azure Load Balancer within a specific Network Security Group. - -- **Persistent Volumes:** Supports using Azure Blob disks and Azure Managed Disks with standard and premium storage accounts. - -- **Network Storage:** Support Azure Files via CIFS mounts. - -The following account types are not supported for Azure Subscriptions: - -- Single tenant accounts (i.e. accounts with no subscriptions). -- Multi-subscription accounts. - -To set up the Azure cloud provider following credentials need to be configured: - -1. [Set up the Azure Tenant ID](#1-set-up-the-azure-tenant-id) -2. [Set up the Azure Client ID and Azure Client Secret](#2-set-up-the-azure-client-id-and-azure-client-secret) -3. [Configure App Registration Permissions](#3-configure-app-registration-permissions) -4. [Set up Azure Network Security Group Name](#4-set-up-azure-network-security-group-name) - -### 1. Set up the Azure Tenant ID - -Visit [Azure portal](https://portal.azure.com), login and go to **Azure Active Directory** and select **Properties**. Your **Directory ID** is your **Tenant ID** (tenantID). - -If you want to use the Azure CLI, you can run the command `az account show` to get the information. - -### 2. Set up the Azure Client ID and Azure Client Secret - -Visit [Azure portal](https://portal.azure.com), login and follow the steps below to create an **App Registration** and the corresponding **Azure Client ID** (aadClientId) and **Azure Client Secret** (aadClientSecret). - -1. Select **Azure Active Directory**. -1. Select **App registrations**. -1. Select **New application registration**. -1. Choose a **Name**, select `Web app / API` as **Application Type** and a **Sign-on URL** which can be anything in this case. -1. Select **Create**. - -In the **App registrations** view, you should see your created App registration. The value shown in the column **APPLICATION ID** is what you need to use as **Azure Client ID**. - -The next step is to generate the **Azure Client Secret**: - -1. Open your created App registration. -1. In the **Settings** view, open **Keys**. -1. Enter a **Key description**, select an expiration time and select **Save**. -1. The generated value shown in the column **Value** is what you need to use as **Azure Client Secret**. This value will only be shown once. - -### 3. Configure App Registration Permissions - -The last thing you will need to do, is assign the appropriate permissions to your App registration. - -1. Go to **More services**, search for **Subscriptions** and open it. -1. Open **Access control (IAM)**. -1. Select **Add**. -1. For **Role**, select `Contributor`. -1. For **Select**, select your created App registration name. -1. Select **Save**. - -### 4. Set up Azure Network Security Group Name - -A custom Azure Network Security Group (securityGroupName) is needed to allow Azure Load Balancers to work. - -If you provision hosts using Rancher Machine Azure driver, you will need to edit them manually to assign them to this Network Security Group. - -You should already assign custom hosts to this Network Security Group during provisioning. - -Only hosts expected to be load balancer back ends need to be in this group. diff --git a/content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/gce/_index.md b/content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/gce/_index.md deleted file mode 100644 index 000b537c110..00000000000 --- a/content/rancher/v2.5/en/ecm/infrastructure/cloud-providers/gce/_index.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Setting up the Google Compute Engine Cloud Provider -weight: 3 ---- - -In this section, you'll learn how to enable the Google Compute Engine (GCE) cloud provider for custom clusters in Rancher. A custom cluster is one in which Rancher installs Kubernetes on existing nodes. - -The official Kubernetes documentation for the GCE cloud provider is [here.](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#gce) - -> **Prerequisites:** The service account of `Identity and API` access on GCE needs the `Computer Admin` permission. - -If you are using Calico, - -1. Go to the cluster view in the Rancher UI, and click **⋮ > Edit.** -1. Click **Edit as YAML,** and enter the following configuration: - - ``` - rancher_kubernetes_engine_config: - cloud_provider: - name: gce - customCloudProvider: |- - [Global] - project-id= - network-name= - subnetwork-name= - node-instance-prefix= - node-tags= - network: - options: - calico_cloud_provider: "gce" - plugin: "calico" - ``` - -If you are using Canal or Flannel, - -1. Go to the cluster view in the Rancher UI, and click **⋮ > Edit.** -1. Click **Edit as YAML,** and enter the following configuration: - - ``` - rancher_kubernetes_engine_config: - cloud_provider: - name: gce - customCloudProvider: |- - [Global] - project-id= - network-name= - subnetwork-name= - node-instance-prefix= - node-tags= - services: - kube_controller: - extra_args: - configure-cloud-routes: true # we need to allow the cloud provider configure the routes for the hosts - ``` \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/infrastructure/node-templates/_index.md b/content/rancher/v2.5/en/ecm/infrastructure/node-templates/_index.md deleted file mode 100644 index 2ffe51ba190..00000000000 --- a/content/rancher/v2.5/en/ecm/infrastructure/node-templates/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Managing Node Templates -weight: 2 ---- - -When you provision a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. You can create node templates in two contexts: - -- While [provisioning a node pool cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools). -- At any time, from your [user settings](#creating-a-node-template-from-user-settings). - -When you create a node template, it is bound to your user profile. Node templates cannot be shared among users. You can delete stale node templates that you no longer user from your user settings. - -## Creating a Node Template from User Settings - -1. From your user settings, select **User Avatar > Node Templates**. -1. Click **Add Template**. -1. Select one of the cloud providers available. Then follow the instructions on screen to configure the template. - -**Result:** The template is configured. You can use the template later when you [provision a node pool cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools). - -## Updating a Node Template - -1. From your user settings, select **User Avatar > Node Templates**. -1. Choose the node template that you want to edit and click the **⋮ > Edit**. - - > **Note:** As of v2.2.0, the default `active` [node drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use [cloud credentials]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#cloud-credentials). If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. - -1. Edit the required information and click **Save**. - -**Result:** The node template is updated. All node pools using this node template will automatically use the updated information when new nodes are added. - -## Cloning Node Templates - -When creating new node templates from your user settings, you can clone an existing template and quickly update its settings rather than creating a new one from scratch. Cloning templates saves you the hassle of re-entering access keys for the cloud provider. - -1. From your user settings, select **User Avatar > Node Templates**. -1. Find the template you want to clone. Then select **⋮ > Clone**. -1. Complete the rest of the form. - -**Result:** The template is cloned and configured. You can use the template later when you [provision a node pool cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools). - -## Deleting a Node Template - -When you no longer use a node template, you can delete it from your user settings. - -1. From your user settings, select **User Avatar > Node Templates**. -1. Select one or more template from the list. Then click **Delete**. Confirm the delete when prompted. diff --git a/content/rancher/v2.5/en/ecm/infrastructure/nodes/_index.md b/content/rancher/v2.5/en/ecm/infrastructure/nodes/_index.md deleted file mode 100644 index 21b7b49284d..00000000000 --- a/content/rancher/v2.5/en/ecm/infrastructure/nodes/_index.md +++ /dev/null @@ -1,224 +0,0 @@ ---- -title: Nodes and Node Pools -weight: 1 ---- - -After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) to provision the cluster, there are different node options available. - -> If you want to manage the _cluster_ and not individual nodes, see [Editing Clusters]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters). - -This section covers the following topics: - -- [Node options available for each cluster creation option](#node-options-available-for-each-cluster-creation-option) - - [Nodes hosted by an infrastructure provider](#nodes-hosted-by-an-infrastructure-provider) - - [Nodes provisioned by hosted Kubernetes providers](#nodes-provisioned-by-hosted-kubernetes-providers) - - [Imported nodes](#imported-nodes) -- [Managing and editing individual nodes](#managing-and-editing-individual-nodes) -- [Viewing a node in the Rancher API](#viewing-a-node-in-the-rancher-api) -- [Deleting a node](#deleting-a-node) -- [Scaling nodes](#scaling-nodes) -- [SSH into a node hosted by an infrastructure provider](#ssh-into-a-node-hosted-by-an-infrastructure-provider) -- [Cordoning a node](#cordoning-a-node) -- [Draining a node](#draining-a-node) - - [Aggressive and safe draining options](#aggressive-and-safe-draining-options) - - [Grace period](#grace-period) - - [Timeout](#timeout) - - [Drained and cordoned state](#drained-and-cordoned-state) -- [Labeling a node to be ignored by Rancher](#labeling-a-node-to-be-ignored-by-rancher) - -# Node Options Available for Each Cluster Creation Option - -The following table lists which node options are available for each [type of cluster]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-options) in Rancher. Click the links in the **Option** column for more detailed information about each feature. - -| Option | [Nodes Hosted by an Infrastructure Provider][1] | [Custom Node][2] | [Hosted Cluster][3] | [Imported Nodes][4] | Description | -| ------------------------------------------------ | ------------------------------------------------ | ---------------- | ------------------- | ------------------- | ------------------------------------------------------------------ | -| [Cordon](#cordoning-a-node) | ✓ | ✓ | ✓ | | Marks the node as unschedulable. | -| [Drain](#draining-a-node) | ✓ | ✓ | ✓ | | Marks the node as unschedulable _and_ evicts all pods. | -| [Edit](#managing-and-editing-individual-nodes) | ✓ | ✓ | ✓ | | Enter a custom name, description, label, or taints for a node. | -| [View API](#viewing-a-node-in-the-rancher-api) | ✓ | ✓ | ✓ | | View API data. | -| [Delete](#deleting-a-node) | ✓ | ✓ | | | Deletes defective nodes from the cluster. | -| [Download Keys](#ssh-into-a-node-hosted-by-an-infrastructure-provider) | ✓ | | | | Download SSH key for in order to SSH into the node. | -| [Node Scaling](#scaling-nodes) | ✓ | | | | Scale the number of nodes in the node pool up or down. | - -[1]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/ - -### Nodes Hosted by an Infrastructure Provider - -Node pools are available when you provision Rancher-launched Kubernetes clusters on nodes that are [hosted in an infrastructure provider.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) - -Clusters provisioned using [one of the node pool options]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) can be scaled up or down if the node pool is edited. - -A node pool can also automatically maintain the node scale that's set during the initial cluster provisioning if [node auto-replace is enabled.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-auto-replace) This scale determines the number of active nodes that Rancher maintains for the cluster. - -Rancher uses [node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) to replace nodes in the node pool. Each node template uses cloud provider credentials to allow Rancher to set up the node in the infrastructure provider. - -### Nodes Provisioned by Hosted Kubernetes Providers - -Options for managing nodes [hosted by a Kubernetes provider]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. - -### Imported Nodes - -Although you can deploy workloads to an [imported cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) using Rancher, you cannot manage individual cluster nodes. All management of imported cluster nodes must take place outside of Rancher. - -# Managing and Editing Individual Nodes - -Editing a node lets you: - -* Change its name -* Change its description -* Add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) -* Add/Remove [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) - -To manage individual nodes, browse to the cluster that you want to manage and then select **Nodes** from the main menu. You can open the options menu for a node by clicking its **⋮** icon (**...**). - -# Viewing a Node in the Rancher API - -Select this option to view the node's [API endpoints]({{< baseurl >}}/rancher/v2.x/en/api/). - -# Deleting a Node - -Use **Delete** to remove defective nodes from the cloud provider. - -When you the delete a defective node, Rancher can automatically replace it with an identically provisioned node if the node is in a node pool and [node auto-replace is enabled.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-auto-replace) - ->**Tip:** If your cluster is hosted by an infrastructure provider, and you want to scale your cluster down instead of deleting a defective node, [scale down](#scaling-nodes) rather than delete. - -# Scaling Nodes - -For nodes hosted by an infrastructure provider, you can scale the number of nodes in each [node pool]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) by using the scale controls. This option isn't available for other cluster types. - -# SSH into a Node Hosted by an Infrastructure Provider - -For [nodes hosted by an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/), you have the option of downloading its SSH key so that you can connect to it remotely from your desktop. - -1. From the cluster hosted by an infrastructure provider, select **Nodes** from the main menu. - -1. Find the node that you want to remote into. Select **⋮ > Download Keys**. - - **Step Result:** A ZIP file containing files used for SSH is downloaded. - -1. Extract the ZIP file to any location. - -1. Open Terminal. Change your location to the extracted ZIP file. - -1. Enter the following command: - - ``` - ssh -i id_rsa root@ - ``` - -# Cordoning a Node - -_Cordoning_ a node marks it as unschedulable. This feature is useful for performing short tasks on the node during small maintenance windows, like reboots, upgrades, or decommissions. When you're done, power back on and make the node schedulable again by uncordoning it. - -# Draining a Node - -_Draining_ is the process of first cordoning the node, and then evicting all its pods. This feature is useful for performing node maintenance (like kernel upgrades or hardware maintenance). It prevents new pods from deploying to the node while redistributing existing pods so that users don't experience service interruption. - -- For pods with a replica set, the pod is replaced by a new pod that will be scheduled to a new node. Additionally, if the pod is part of a service, then clients will automatically be redirected to the new pod. - -- For pods with no replica set, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it. - -You can drain nodes that are in either a `cordoned` or `active` state. When you drain a node, the node is cordoned, the nodes are evaluated for conditions they must meet to be drained, and then (if it meets the conditions) the node evicts its pods. - -However, you can override the conditions draining when you initiate the drain. You're also given an opportunity to set a grace period and timeout value. - -### Aggressive and Safe Draining Options - -The node draining options are different based on your version of Rancher. - -{{% tabs %}} -{{% tab "Rancher v2.2.x+" %}} -There are two drain modes: aggressive and safe. - -- **Aggressive Mode** - - In this mode, pods won't get rescheduled to a new node, even if they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods. - - Kubernetes also expects the implementation to decide what to do with pods using emptyDir. If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Choosing aggressive mode will delete these pods. - -- **Safe Mode** - - If a node has standalone pods or ephemeral data it will be cordoned but not drained. -{{% /tab %}} -{{% tab "Rancher prior to v2.2.x" %}} - -The following list describes each drain option: - -- **Even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet** - - These types of pods won't get rescheduled to a new node, since they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods. Kubernetes forces you to choose this option (which will delete/evict these pods) or drain won't proceed. - -- **Even if there are DaemonSet-managed pods** - - Similar to above, if you have any daemonsets, drain would proceed only if this option is selected. Even when this option is on, pods won't be deleted since they'll immediately be replaced. On startup, Rancher currently has a few daemonsets running by default in the system, so this option is turned on by default. - -- **Even if there are pods using emptyDir** - - If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Similar to the first option, Kubernetes expects the implementation to decide what to do with these pods. Choosing this option will delete these pods. -{{% /tab %}} -{{% /tabs %}} - -### Grace Period - -The timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. For example, when pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If negative, the default value specified in the pod will be used. - -### Timeout - -The amount of time drain should continue to wait before giving up. - ->**Kubernetes Known Issue:** The [timeout setting](https://github.com/kubernetes/kubernetes/pull/64378) was not enforced while draining a node prior to Kubernetes 1.12. - -### Drained and Cordoned State - -If there's any error related to user input, the node enters a `cordoned` state because the drain failed. You can either correct the input and attempt to drain the node again, or you can abort by uncordoning the node. - -If the drain continues without error, the node enters a `draining` state. You'll have the option to stop the drain when the node is in this state, which will stop the drain process and change the node's state to `cordoned`. - -Once drain successfully completes, the node will be in a state of `drained`. You can then power off or delete the node. - ->**Want to know more about cordon and drain?** See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/#maintenance-on-a-node). - -# Labeling a Node to be Ignored by Rancher - -Some solutions, such as F5's BIG-IP integration, may require creating a node that is never registered to a cluster. - -Since the node will never finish registering, it will always be shown as unhealthy in the Rancher UI. - -In that case, you may want to label the node to be ignored by Rancher so that Rancher only shows nodes as unhealthy when they are actually failing. - -You can label nodes to be ignored by using a setting in the Rancher UI, or by using `kubectl`. - -> **Note:** There is an [open issue](https://github.com/rancher/rancher/issues/24172) in which nodes labeled to be ignored can get stuck in an updating state. - -### Labeling Nodes to be Ignored with the Rancher UI - -To add a node that is ignored by Rancher, - -1. From the **Global** view, click the **Settings** tab. -1. Go to the `ignore-node-name` setting and click **⋮ > Edit.** -1. Enter a name that Rancher will use to ignore nodes. All nodes with this name will be ignored. -1. Click **Save.** - -**Result:** Rancher will not wait to register nodes with this name. In the UI, the node will displayed with a grayed-out status. The node is still part of the cluster and can be listed with `kubectl`. - -If the setting is changed afterward, the ignored nodes will continue to be hidden. - -### Labeling Nodes to be Ignored with kubectl - -To add a node that will be ignored by Rancher, use `kubectl` to create a node that has the following label: - -``` -cattle.rancher.io/node-status: ignore -``` - -**Result:** If you add the node to a cluster, Rancher will not attempt to sync with this node. The node can still be part of the cluster and can be listed with `kubectl`. - -If the label is added before the node is added to the cluster, the node will not be shown in the Rancher UI. - -If the label is added after the node is added to a Rancher cluster, the node will not be removed from the UI. - -If you delete the node from the Rancher server using the Rancher UI or API, the node will not be removed from the cluster if the `nodeName` is listed in the Rancher settings under `ignore-node-name`. diff --git a/content/rancher/v2.5/en/ecm/k8s-metadata/_index.md b/content/rancher/v2.5/en/ecm/k8s-metadata/_index.md deleted file mode 100644 index 990ed730a2a..00000000000 --- a/content/rancher/v2.5/en/ecm/k8s-metadata/_index.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Upgrading Kubernetes without Upgrading Rancher -weight: 13 ---- - -The RKE metadata feature allows you to provision clusters with new versions of Kubernetes as soon as they are released, without upgrading Rancher. This feature is useful for taking advantage of patch versions of Kubernetes, for example, if you want to upgrade to Kubernetes v1.14.7 when your Rancher server originally supported v1.14.6. - -> **Note:** The Kubernetes API can change between minor versions. Therefore, we don't support introducing minor Kubernetes versions, such as introducing v1.15 when Rancher currently supports v1.14. You would need to upgrade Rancher to add support for minor Kubernetes versions. - -Rancher's Kubernetes metadata contains information specific to the Kubernetes version that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). Rancher syncs the data periodically and creates custom resource definitions (CRDs) for **system images,** **service options** and **addon templates.** Consequently, when a new Kubernetes version is compatible with the Rancher server version, the Kubernetes metadata makes the new version available to Rancher for provisioning clusters. The metadata gives you an overview of the information that the [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE) uses for deploying various Kubernetes versions. - -This table below describes the CRDs that are affected by the periodic data sync. - -> **Note:** Only administrators can edit metadata CRDs. It is recommended not to update existing objects unless explicitly advised. - -| Resource | Description | Rancher API URL | -|----------|-------------|-----------------| -| System Images | List of system images used to deploy Kubernetes through RKE. | `/v3/rkek8ssystemimages` | -| Service Options | Default options passed to Kubernetes components like `kube-api`, `scheduler`, `kubelet`, `kube-proxy`, and `kube-controller-manager` | `/v3/rkek8sserviceoptions` | -| Addon Templates | YAML definitions used to deploy addon components like Canal, Calico, Flannel, Weave, Kube-dns, CoreDNS, `metrics-server`, `nginx-ingress` | `/v3/rkeaddons` | - -Administrators might configure the RKE metadata settings to do the following: - -- Refresh the Kubernetes metadata, if a new patch version of Kubernetes comes out and they want Rancher to provision clusters with the latest version of Kubernetes without having to upgrade Rancher -- Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub -- Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher - -### Refresh Kubernetes Metadata - -The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) - -To force Rancher to refresh the Kubernetes metadata, a manual refresh action is available under **Tools > Drivers > Refresh Kubernetes Metadata** on the right side corner. - -### Configuring the Metadata Synchronization - -> Only administrators can change these settings. - -The RKE metadata config controls how often Rancher syncs metadata and where it downloads data from. You can configure the metadata from the settings in the Rancher UI, or through the Rancher API at the endpoint `v3/settings/rke-metadata-config`. - -The way that the metadata is configured depends on the Rancher version. - -{{% tabs %}} -{{% tab "Rancher v2.4+" %}} -To edit the metadata config in Rancher, - -1. Go to the **Global** view and click the **Settings** tab. -1. Go to the **rke-metadata-config** section. Click the **⋮** and click **Edit.** -1. You can optionally fill in the following parameters: - - - `refresh-interval-minutes`: This is the amount of time that Rancher waits to sync the metadata. To disable the periodic refresh, set `refresh-interval-minutes` to 0. - - `url`: This is the HTTP path that Rancher fetches data from. The path must be a direct path to a JSON file. For example, the default URL for Rancher v2.4 is `https://releases.rancher.com/kontainer-driver-metadata/release-v2.4/data.json`. - -If you don't have an air gap setup, you don't need to specify the URL where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata/blob/dev-v2.5/data/data.json) - -However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. -{{% /tab %}} -{{% tab "Rancher v2.3" %}} -To edit the metadata config in Rancher, - -1. Go to the **Global** view and click the **Settings** tab. -1. Go to the **rke-metadata-config** section. Click the **⋮** and click **Edit.** -1. You can optionally fill in the following parameters: - - - `refresh-interval-minutes`: This is the amount of time that Rancher waits to sync the metadata. To disable the periodic refresh, set `refresh-interval-minutes` to 0. - - `url`: This is the HTTP path that Rancher fetches data from. - - `branch`: This refers to the Git branch name if the URL is a Git URL. - -If you don't have an air gap setup, you don't need to specify the URL or Git branch where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata.git) - -However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL and Git branch in the `rke-metadata-config` settings to point to the new location of the repository. -{{% /tab %}} -{{% /tabs %}} - -### Air Gap Setups - -Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) - -If you have an air gap setup, you might not be able to get the automatic periodic refresh of the Kubernetes metadata from Rancher's Git repository. In that case, you should disable the periodic refresh to prevent your logs from showing errors. Optionally, you can configure your metadata settings so that Rancher can sync with a local copy of the RKE metadata. - -To sync Rancher with a local mirror of the RKE metadata, an administrator would configure the `rke-metadata-config` settings to point to the mirror. For details, refer to [Configuring the Metadata Synchronization.](#configuring-the-metadata-synchronization) - -After new Kubernetes versions are loaded into the Rancher setup, additional steps would be required in order to use them for launching clusters. Rancher needs access to updated system images. While the metadata settings can only be changed by administrators, any user can download the Rancher system images and prepare a private Docker registry for them. - -1. To download the system images for the private registry, click the Rancher server version at the bottom left corner of the Rancher UI. -1. Download the OS specific image lists for Linux or Windows. -1. Download `rancher-images.txt`. -1. Prepare the private registry using the same steps during the [air gap install]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry), but instead of using the `rancher-images.txt` from the releases page, use the one obtained from the previous steps. - -**Result:** The air gap installation of Rancher can now sync the Kubernetes metadata. If you update your private registry when new versions of Kubernetes are released, you can provision clusters with the new version without having to upgrade Rancher. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/pod-security-policies-in-projects/_index.md b/content/rancher/v2.5/en/ecm/pod-security-policies-in-projects/_index.md deleted file mode 100644 index af8ff997503..00000000000 --- a/content/rancher/v2.5/en/ecm/pod-security-policies-in-projects/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Pod Security Policies in Projects -weight: 5600 ---- - -> These cluster options are only available for [clusters in which Rancher has launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -You can always assign a pod security policy (PSP) to an existing project if you didn't assign one during creation. - -### Prerequisites - -- Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/). -- Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [Existing Cluster: Adding a Pod Security Policy]({{}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters/#adding-changing-a-pod-security-policy). - -### Applying a Pod Security Policy - -1. From the **Global** view, find the cluster containing the project you want to apply a PSP to. -1. From the main menu, select **Projects/Namespaces**. -1. Find the project that you want to add a PSP to. From that project, select **⋮ > Edit**. -1. From the **Pod Security Policy** drop-down, select the PSP you want to apply to the project. - Assigning a PSP to a project will: - - - Override the cluster's default PSP. - - Apply the PSP to the project. - - Apply the PSP to any namespaces you add to the project later. - -1. Click **Save**. - -**Result:** The PSP is applied to the project and any namespaces added to the project. - ->**Note:** Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked to determine if they comply with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/pod-security-policies/_index.md b/content/rancher/v2.5/en/ecm/pod-security-policies/_index.md deleted file mode 100644 index 9b064ecf94f..00000000000 --- a/content/rancher/v2.5/en/ecm/pod-security-policies/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: Adding a Pod Security Policy -weight: 5 ---- - -_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification (like root privileges). If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message of `Pod is forbidden: unable to validate...`. - -> **Note:** Assigning Pod Security Policies are only available for clusters that are [launched using RKE.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) - -- You can assign PSPs at the cluster or project level. -- PSPs work through inheritance. - - - By default, PSPs assigned to a cluster are inherited by its projects, as well as any namespaces added to those projects. - - **Exception:** Namespaces that are not assigned to projects do not inherit PSPs, regardless of whether the PSP is assigned to a cluster or project. Because these namespaces have no PSPs, workload deployments to these namespaces will fail, which is the default Kubernetes behavior. - - You can override the default PSP by assigning a different PSP directly to the project. -- Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked if it complies with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. - ->**Note:** You must enable PSPs at the cluster level before you can assign them to a project. This can be configured by [editing the cluster.]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) - -Read more about Pod Security Policies in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). - ->**Best Practice:** Set pod security at the cluster level. - -Using Rancher, you can create a Pod Security Policy using our GUI rather than creating a YAML file. - -## Default Pod Security Policies - -Rancher ships with two default Pod Security Policies (PSPs): the `restricted` and `unrestricted` policies. - -- `restricted` - - This policy is based on the Kubernetes [example restricted policy](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/restricted-psp.yaml). It significantly restricts what types of pods can be deployed to a cluster or project. This policy: - - - Prevents pods from running as a privileged user and prevents escalation of privileges. - - Validates that server-required security mechanisms are in place (such as restricting what volumes can be mounted to only the core volume types and preventing root supplemental groups from being added). - -- `unrestricted` - - This policy is equivalent to running Kubernetes with the PSP controller disabled. It has no restrictions on what pods can be deployed into a cluster or project. - -## Creating Pod Security Policies - -1. From the **Global** view, select **Security** > **Pod Security Policies** from the main menu. Then click **Add Policy**. - - **Step Result:** The **Add Policy** form opens. - -2. Name the policy. - -3. Complete each section of the form. Refer to the Kubernetes documentation linked below for more information on what each policy does. - - - Basic Policies: - - - [Privilege Escalation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#privilege-escalation) - - [Host Namespaces][2] - - [Read Only Root Filesystems][1] - - - [Capability Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#capabilities) - - [Volume Policy][1] - - [Allowed Host Paths Policy][1] - - [FS Group Policy][1] - - [Host Ports Policy][2] - - [Run As User Policy][3] - - [SELinux Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#selinux) - - [Supplemental Groups Policy][3] - -### What's Next? - -You can add a Pod Security Policy (PSPs hereafter) in the following contexts: - -- [When creating a cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/) -- [When editing an existing cluster]({{}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters/) -- [When creating a project]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#creating-a-project/) -- [When editing an existing project]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/editing-projects/) - -> **Note:** We recommend adding PSPs during cluster and project creation instead of adding it to an existing one. - - - - -[1]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#volumes-and-file-systems -[2]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces -[3]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#users-and-groups diff --git a/content/rancher/v2.5/en/ecm/projects-and-namespaces/_index.md b/content/rancher/v2.5/en/ecm/projects-and-namespaces/_index.md deleted file mode 100644 index e7125947fca..00000000000 --- a/content/rancher/v2.5/en/ecm/projects-and-namespaces/_index.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Projects and Kubernetes Namespaces with Rancher -description: Rancher Projects ease the administrative burden of your cluster and support multi-tenancy. Learn to create projects and divide projects into Kubernetes namespaces -weight: 200 ---- - -A namespace is a Kubernetes concept that allows a virtual cluster within a cluster, which is useful for dividing the cluster into separate "virtual clusters" that each have their own access control and resource quotas. - -A project is a group of namespaces, and it is a concept introduced by Rancher. Projects allow you to manage multiple namespaces as a group and perform Kubernetes operations in them. You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. - -This section describes how projects and namespaces work with Rancher. It covers the following topics: - -- [About namespaces](#about-namespaces) -- [About projects](#about-projects) - - [The cluster's default project](#the-cluster-s-default-project) - - [The system project](#the-system-project) -- [Project authorization](#project-authorization) -- [Pod security policies](#pod-security-policies) -- [Creating projects](#creating-projects) -- [Switching between clusters and projects](#switching-between-clusters-and-projects) - -# About Namespaces - -A namespace is a concept introduced by Kubernetes. According to the [official Kubernetes documentation on namespaces,](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) - -> Kubernetes supports multiple virtual clusters backed by the same physical cluster. These virtual clusters are called namespaces. [...] Namespaces are intended for use in environments with many users spread across multiple teams, or projects. For clusters with a few to tens of users, you should not need to create or think about namespaces at all. - -Namespaces provide the following functionality: - -- **Providing a scope for names:** Names of resources need to be unique within a namespace, but not across namespaces. Namespaces can not be nested inside one another and each Kubernetes resource can only be in one namespace. -- **Resource quotas:** Namespaces provide a way to divide cluster resources between multiple users. - -You can assign resources at the project level so that each namespace in the project can use them. You can also bypass this inheritance by assigning resources explicitly to a namespace. - -You can assign the following resources directly to namespaces: - -- [Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/) -- [Load Balancers/Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/) -- [Service Discovery Records]({{}}/rancher/v2.x/en/k8s-in-rancher/service-discovery/) -- [Persistent Volume Claims]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) -- [Certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/) -- [ConfigMaps]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/) -- [Registries]({{}}/rancher/v2.x/en/k8s-in-rancher/registries/) -- [Secrets]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/) - -To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. - -For more information on creating and moving namespaces, see [Namespaces]({{}}/rancher/v2.x/en/project-admin/namespaces/). - -### Role-based access control issues with namespaces and kubectl - -Because projects are a concept introduced by Rancher, kubectl does not have the capability to restrict the creation of namespaces to a project the creator has access to. - -This means that when standard users with project-scoped permissions create a namespaces with `kubectl`, it may be unusable because `kubectl` doesn't require the new namespace to be scoped within a certain project. - -If your permissions are restricted to the project level, it is better to [create a namespace through Rancher]({{}}/rancher/v2.x/en/project-admin/namespaces/#creating-namespaces) to ensure that you will have permission to access the namespace. - -If a standard user is a project owner, the user will be able to create namespaces within that project. The Rancher UI will prevent that user from creating namespaces outside the scope of the projects they have access to. - -# About Projects - -In terms of hierarchy: - -- Clusters contain projects -- Projects contain namespaces - -You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. - -In the base version of Kubernetes, features like role-based access rights or cluster resources are assigned to individual namespaces. A project allows you to save time by giving an individual or a team access to multiple namespaces simultaneously. - -You can use projects to perform actions such as: - -- Assign users to a group of namespaces (i.e., [project membership]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members)). -- Assign users specific roles in a project. A role can be owner, member, read-only, or [custom]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). -- Assign resources to the project. -- Assign Pod Security Policies. - -When you create a cluster, two projects are automatically created within it: - -- [Default Project](#the-cluster-s-default-project) -- [System Project](#the-system-project) - -### The Cluster's Default Project - -When you provision a cluster with Rancher, it automatically creates a `default` project for the cluster. This is a project you can use to get started with your cluster, but you can always delete it and replace it with projects that have more descriptive names. - -If you don't have a need for more than the default namespace, you also do not need more than the **Default** project in Rancher. - -If you require another level of organization beyond the **Default** project, you can create more projects in Rancher to isolate namespaces, applications and resources. - -### The System Project - -When troubleshooting, you can view the `system` project to check if important namespaces in the Kubernetes system are working properly. This easily accessible project saves you from troubleshooting individual system namespace containers. - -To open it, open the **Global** menu, and then select the `system` project for your cluster. - -The `system` project: - -- Is automatically created when you provision a cluster. -- Lists all namespaces that exist in `v3/settings/system-namespaces`, if they exist. -- Allows you to add more namespaces or move its namespaces to other projects. -- Cannot be deleted because it's required for cluster operations. - ->**Note:** In clusters where both: -> -> - The [Canal network plug-in]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#canal) is in use. -> - The Project Network Isolation option is enabled. -> ->The `system` project overrides the Project Network Isolation option so that it can communicate with other projects, collect logs, and check health. - -# Project Authorization - -Standard users are only authorized for project access in two situations: - -- An administrator, cluster owner or cluster member explicitly adds the standard user to the project's **Members** tab. -- Standard users can access projects that they create themselves. - -# Pod Security Policies - -Rancher extends Kubernetes to allow the application of [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) at the [project level]({{}}/rancher/v2.x/en/project-admin/pod-security-policies) in addition to the [cluster level.](../pod-security-policy) However, as a best practice, we recommend applying Pod Security Policies at the cluster level. - -# Creating Projects - -This section describes how to create a new project with a name and with optional pod security policy, members, and resource quotas. - -1. [Name a new project.](#1-name-a-new-project) -2. [Optional: Select a pod security policy.](#2-optional-select-a-pod-security-policy) -3. [Recommended: Add project members.](#3-recommended-add-project-members) -4. [Optional: Add resource quotas.](#4-optional-add-resource-quotas) - -### 1. Name a New Project - -1. From the **Global** view, choose **Clusters** from the main menu. From the **Clusters** page, open the cluster from which you want to create a project. - -1. From the main menu, choose **Projects/Namespaces**. Then click **Add Project**. - -1. Enter a **Project Name**. - -### 2. Optional: Select a Pod Security Policy - -This option is only available if you've already created a Pod Security Policy. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/). - -Assigning a PSP to a project will: - -- Override the cluster's default PSP. -- Apply the PSP to the project. -- Apply the PSP to any namespaces you add to the project later. - -### 3. Recommended: Add Project Members - -Use the **Members** section to provide other users with project access and roles. - -By default, your user is added as the project `Owner`. - ->**Notes on Permissions:** -> ->- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. ->- Choose `Custom` to create a custom role on the fly: [Custom Project Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#custom-project-roles). - -To add members: - -1. Click **Add Member**. -1. From the **Name** combo box, search for a user or group that you want to assign project access. Note: You can only search for groups if external authentication is enabled. -1. From the **Role** drop-down, choose a role. For more information, refer to the [documentation on project roles.]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) - -### 4. Optional: Add Resource Quotas - -Resource quotas limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas). - -To add a resource quota, - -1. Click **Add Quota**. -1. Select a [Resource Type]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#resource-quota-types). -1. Enter values for the **Project Limit** and the **Namespace Default Limit**. -1. **Optional:** Specify **Container Default Resource Limit**, which will be applied to every container started in the project. The parameter is recommended if you have CPU or Memory limits set by the Resource Quota. It can be overridden on per an individual namespace or a container level. For more information, see [Container Default Resource Limit]({{}}/rancher/v2.x/en/project-admin/resource-quotas/#setting-container-default-resource-limit) Note: This option is available as of v2.2.0. -1. Click **Create**. - -**Result:** Your project is created. You can view it from the cluster's **Projects/Namespaces** view. - -| Field | Description | -| ----------------------- | -------------------------------------------------------------------------------------------------------- | -| Project Limit | The overall resource limit for the project. | -| Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project when created. The combined limit of all project namespaces shouldn't exceed the project limit. | - -# Switching between Clusters and Projects - -To switch between clusters and projects, use the **Global** drop-down available in the main menu. - -![Global Menu]({{}}/img/rancher/global-menu.png) - -Alternatively, you can switch between projects and clusters using the main menu. - -- To switch between clusters, open the **Global** view and select **Clusters** from the main menu. Then open a cluster. -- To switch between projects, open a cluster, and then select **Projects/Namespaces** from the main menu. Select the link for the project that you want to open. diff --git a/content/rancher/v2.5/en/ecm/requirements/_index.md b/content/rancher/v2.5/en/ecm/requirements/_index.md deleted file mode 100644 index ab08b4e87b5..00000000000 --- a/content/rancher/v2.5/en/ecm/requirements/_index.md +++ /dev/null @@ -1,314 +0,0 @@ ---- -title: Installation Requirements -description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup -weight: 2 ---- - -> This section is under construction. - -This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. - -> It is important to note that if you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/) which will run your apps and services. - -Make sure the node(s) for the Rancher server fulfill the following requirements: - -- [Operating Systems and Docker Requirements](#operating-systems-and-docker-requirements) -- [Hardware Requirements](#hardware-requirements) - - [CPU and Memory](#cpu-and-memory) - - [CPU and Memory for Rancher prior to v2.4.0](#cpu-and-memory-for-rancher-prior-to-v2-4-0) - - [Disks](#disks) -- [Networking Requirements](#networking-requirements) - - [Node IP Addresses](#node-ip-addresses) - - [Port Requirements](#port-requirements) - -For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.]({{}}/rancher/v2.x/en/best-practices/deployment-types/) - -The Rancher UI works best in Firefox or Chrome. - -# Operating Systems and Container Runtime Requirements - -Rancher should work with any modern Linux distribution. - -For the container runtime, RKE should work with any modern Docker version, while K3s should work with any modern version of Docker or containerd. - -Rancher and RKE have been tested and are supported on Ubuntu, CentOS, Oracle Linux, RancherOS, and RedHat Enterprise Linux. - -K3s should run on just about any flavor of Linux. However, K3s is tested on the following operating systems and their subsequent non-major releases: - -- Ubuntu 16.04 (amd64) -- Ubuntu 18.04 (amd64) -- Raspbian Buster (armhf) - -If you are installing Rancher on a K3s cluster with **Raspbian Buster**, follow [these steps]({{}}/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster) to switch to legacy iptables. - -If you are installing Rancher on a K3s cluster with Alpine Linux, follow [these steps]({{}}/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. - -For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -All supported operating systems are 64-bit x86. - -The `ntp` (Network Time Protocol) package should be installed. This prevents errors with certificate validation that can occur when the time is not synchronized between the client and server. - -Some distributions of Linux may have default firewall rules that block communication with Helm. This [how-to guide]({{}}/rancher/v2.x/en/installation/options/firewall) shows how to check the default firewall rules for Oracle Linux and how to open the ports with `firewalld` if necessary. - -If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.x/en/installation/options/arm64-platform/) - -### Installing Docker - -Docker can be installed by following the steps in the official [Docker documentation.](https://docs.docker.com/) Rancher also provides [scripts]({{}}/rancher/v2.x/en/installation/requirements/installing-docker) to install Docker with one command. - -# Hardware Requirements - -This section describes the CPU, memory, and disk requirements for the nodes where the Rancher server is installed. - -### CPU and Memory - -Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. The requirements are different depending on if you are installing Rancher in a single container with Docker, or if you are installing Rancher on a Kubernetes cluster. - -{{% tabs %}} -{{% tab "RKE Install Requirements" %}} - -These requirements apply to each host in an [RKE Kubernetes cluster where the Rancher server is installed.]({{}}/rancher/v2.x/en/installation/k8s-install/) - -Performance increased in Rancher v2.4.0. For the requirements of Rancher prior to v2.4.0, refer to [this section.](#cpu-and-memory-for-rancher-prior-to-v2-4-0) - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | ---------- | ------------ | -------| ------- | -| Small | Up to 150 | Up to 1500 | 2 | 8 GB | -| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | -| Large | Up to 500 | Up to 5000 | 8 | 32 GB | -| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | -| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | - -[Contact Rancher](https://rancher.com/contact/) for more than 2000 clusters and/or 20,000 nodes. -{{% /tab %}} - -{{% tab "K3s Install Requirements" %}} - -These requirements apply to each host in a [K3s Kubernetes cluster where the Rancher server is installed.]({{}}/rancher/v2.x/en/installation/k8s-install/) - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | Database Size | -| --------------- | ---------- | ------------ | -------| ---------| ------------------------- | -| Small | Up to 150 | Up to 1500 | 2 | 8 GB | 2 cores, 4 GB + 1000 IOPS | -| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | 2 cores, 4 GB + 1000 IOPS | -| Large | Up to 500 | Up to 5000 | 8 | 32 GB | 2 cores, 4 GB + 1000 IOPS | -| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | 2 cores, 4 GB + 1000 IOPS | -| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | 2 cores, 4 GB + 1000 IOPS | - -[Contact Rancher](https://rancher.com/contact/) for more than 2000 clusters and/or 20,000 nodes. - -{{% /tab %}} - -{{% tab "Docker Install Requirements" %}} - -These requirements apply to a host with a [single-node]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) installation of Rancher. - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | -------- | --------- | ----- | ---- | -| Small | Up to 5 | Up to 50 | 1 | 4 GB | -| Medium | Up to 15 | Up to 200 | 2 | 8 GB | - -{{% /tab %}} -{{% /tabs %}} - -### CPU and Memory for Rancher prior to v2.4.0 - -{{% accordion label="Click to expand" %}} -These requirements apply to installing Rancher on an RKE Kubernetes cluster prior to Rancher v2.4.0: - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | --------- | ---------- | ----------------------------------------------- | ----------------------------------------------- | -| Small | Up to 5 | Up to 50 | 2 | 8 GB | -| Medium | Up to 15 | Up to 200 | 4 | 16 GB | -| Large | Up to 50 | Up to 500 | 8 | 32 GB | -| X-Large | Up to 100 | Up to 1000 | 32 | 128 GB | -| XX-Large | 100+ | 1000+ | [Contact Rancher](https://rancher.com/contact/) | [Contact Rancher](https://rancher.com/contact/) | -{{% /accordion %}} - -### Disks - -Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPS. In larger clusters, consider using dedicated storage devices for etcd data and wal directories. - -# Networking Requirements - -This section describes the networking requirements for the node(s) where the Rancher server is installed. - -### Node IP Addresses - -Each node used should have a static IP configured, regardless of whether you are installing Rancher on a single node or on an HA cluster. In case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. - -### Port Requirements - -This section describes the port requirements for nodes running the `rancher/rancher` container. - -The port requirements are different depending on whether you are installing Rancher on a K3s cluster, on an RKE cluster, or in a single Docker container. - -{{% tabs %}} -{{% tab "K3s" %}} -### Ports for Communication with Downstream Clusters - -To communicate with downstream clusters, Rancher requires different ports to be open depending on the infrastructure you are using. - -For example, if you are deploying Rancher on nodes hosted by an infrastructure provider, port `22` must be open for SSH. - -The following diagram depicts the ports that are opened for each [cluster type]({{}}/rancher/v2.x/en/cluster-provisioning). - -
Port Requirements for the Rancher Management Plane
- -![Basic Port Requirements]({{}}/img/rancher/port-communications.svg) - -The following tables break down the port requirements for inbound and outbound traffic: - -
Inbound Rules for Rancher Nodes
- -| Protocol | Port | Source | Description | -| -------- | ---- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------- | -| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | -| TCP | 443 |
  • server nodes
  • agent nodes
  • hosted/imported Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl | - -
Outbound Rules for Rancher Nodes
- -| Protocol | Port | Destination | Description | -| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | -| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | -| TCP | 443 | `35.160.43.145/32`, `35.167.242.46/32`, `52.33.59.17/32` | git.rancher.io (catalogs) | -| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | - -**Note** Rancher nodes may also require additional outbound access for any external [authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/) which is configured (LDAP for example). - -### Additional Port Requirements for Nodes in a K3s Kubernetes Cluster - -You will need to open additional ports to launch the Kubernetes cluster that is required for a high-availability installation of Rancher. - -The K3s server needs port 6443 to be accessible by the nodes. - -The nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. However, if you do not use Flannel and provide your own custom CNI, then port 8472 is not needed by K3s. - -If you wish to utilize the metrics server, you will need to open port 10250 on each node. - -> **Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. - -
Inbound Rules for Rancher Server Nodes
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 6443 | K3s server nodes | Kubernetes API -| UDP | 8472 | K3s server and agent nodes | Required only for Flannel VXLAN -| TCP | 10250 | K3s server and agent nodes | kubelet - -Typically all outbound traffic is allowed. -{{% /tab %}} -{{% tab "RKE" %}} -### Ports for Communication with Downstream Clusters - -To communicate with downstream clusters, Rancher requires different ports to be open depending on the infrastructure you are using. - -For example, if you are deploying Rancher on nodes hosted by an infrastructure provider, port `22` must be open for SSH. - -The following diagram depicts the ports that are opened for each [cluster type]({{}}/rancher/v2.x/en/cluster-provisioning). - -
Port Requirements for the Rancher Management Plane
- -![Basic Port Requirements]({{}}/img/rancher/port-communications.svg) - -The following tables break down the port requirements for inbound and outbound traffic: - -
Inbound Rules for Rancher Nodes
- -| Protocol | Port | Source | Description | -| -------- | ---- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------- | -| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | -| TCP | 443 |
  • etcd nodes
  • controlplane nodes
  • worker nodes
  • hosted/imported Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl | - -
Outbound Rules for Rancher Nodes
- -| Protocol | Port | Destination | Description | -| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | -| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | -| TCP | 443 | `35.160.43.145/32`, `35.167.242.46/32`, `52.33.59.17/32` | git.rancher.io (catalogs) | -| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | - -**Note** Rancher nodes may also require additional outbound access for any external [authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/) which is configured (LDAP for example). - -### Additional Port Requirements for Nodes in an RKE Kubernetes Cluster - -You will need to open additional ports to launch the Kubernetes cluster that is required for a high-availability installation of Rancher. - -If you follow the Rancher installation documentation for setting up a Kubernetes cluster using RKE, you will set up a cluster in which all three nodes have all three roles: etcd, controlplane, and worker. In that case, you can refer to this list of requirements for each node with all three roles. - -If you installed Rancher on a Kubernetes cluster that doesn't have all three roles on each node, refer to the [port requirements for the Rancher Kubernetes Engine (RKE).]({{}}/rke/latest/en/os/#ports) The RKE docs show a breakdown of the port requirements for each role. - -
Inbound Rules for Nodes with All Three Roles: etcd, Controlplane, and Worker
- -Protocol | Port | Source | Description ------------|------|----------|-------------- -TCP | 22 | Linux worker nodes only, and any network that you want to be able to remotely access this node from. | Remote access over SSH -TCP | 80 | Any source that consumes Ingress services | Ingress controller (HTTP) -TCP | 443 | Any source that consumes Ingress services | Ingress controller (HTTPS) -TCP | 2376 | Rancher nodes | Docker daemon TLS port used by Docker Machine (only needed when using Node Driver/Templates) -TCP | 2379 | etcd nodes and controlplane nodes | etcd client requests -TCP | 2380 | etcd nodes and controlplane nodes | etcd peer communication -TCP | 3389 | Windows worker nodes only, and any network that you want to be able to remotely access this node from. | Remote access over RDP -TCP | 6443 | etcd nodes, controlplane nodes, and worker nodes | Kubernetes apiserver -UDP | 8472 | etcd nodes, controlplane nodes, and worker nodes | Canal/Flannel VXLAN overlay networking -TCP | 9099 | the node itself (local traffic, not across nodes) | Canal/Flannel livenessProbe/readinessProbe -TCP | 10250 | controlplane nodes | kubelet -TCP | 10254 | the node itself (local traffic, not across nodes) | Ingress controller livenessProbe/readinessProbe -TCP/UDP | 30000-32767 | Any source that consumes NodePort services | NodePort port range - -
Outbound Rules for Nodes with All Three Roles: etcd, Controlplane, and Worker
- -Protocol | Port | Source | Destination | Description ------------|------|----------|---------------|-------------- -TCP | 22 | RKE node | Any node configured in Cluster Configuration File | SSH provisioning of node by RKE -TCP | 443 | Rancher nodes | Rancher agent | -TCP | 2379 | etcd nodes | etcd client requests | -TCP | 2380 | etcd nodes | etcd peer communication | -TCP | 6443 | RKE node | controlplane nodes | Kubernetes API server -TCP | 6443 | controlplane nodes | Kubernetes API server | -UDP | 8472 | etcd nodes, controlplane nodes, and worker nodes | Canal/Flannel VXLAN overlay networking | -TCP | 9099 | the node itself (local traffic, not across nodes) | Canal/Flannel livenessProbe/readinessProbe | -TCP | 10250 | etcd nodes, controlplane nodes, and worker nodes | kubelet | -TCP | 10254 | the node itself (local traffic, not across nodes) | Ingress controller livenessProbe/readinessProbe - -{{% /tab %}} -{{% tab "Docker" %}} -### Ports for Communication with Downstream Clusters - -For a Docker installation, you only need to open the ports required to enable Rancher to communicate with downstream user clusters. - -The port requirements depend on the infrastructure you are using. For example, if you are deploying Rancher on nodes hosted by an infrastructure provider, port `22` must be open for SSH. - -The following diagram depicts the ports that are opened for each [cluster type]({{}}/rancher/v2.x/en/cluster-provisioning). - -
Port Requirements for the Rancher Management Plane
- -![Basic Port Requirements]({{}}/img/rancher/port-communications.svg) - -The following tables break down the port requirements for Rancher nodes, for inbound and outbound traffic: - -**Note** Rancher nodes may also require additional outbound access for any external [authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/) which is configured (LDAP for example). - - -
Inbound Rules
- -| Protocol | Port | Source | Description | -| -------- | ---- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------- | -| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | -| TCP | 443 |
  • etcd nodes
  • controlplane nodes
  • worker nodes
  • hosted/imported Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl | - - -
Outbound Rules
- -| Protocol | Port | Source | Description | -| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | -| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | -| TCP | 443 | `35.160.43.145/32`, `35.167.242.46/32`, `52.33.59.17/32` | git.rancher.io (catalogs) | -| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | - -**Note** Rancher nodes may also require additional outbound access for any external [authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/) which is configured (LDAP for example). -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.5/en/ecm/requirements/ports/_index.md b/content/rancher/v2.5/en/ecm/requirements/ports/_index.md deleted file mode 100644 index c6cb826bb90..00000000000 --- a/content/rancher/v2.5/en/ecm/requirements/ports/_index.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Port Requirements -description: Read about port requirements needed in order for Rancher to operate properly, both for Rancher nodes and downstream Kubernetes cluster nodes -weight: 300 ---- - -To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. - -## Rancher Nodes - -The following table lists the ports that need to be open to and from nodes that are running the Rancher server. - -The port requirements differ based on whether Rancher is installed in a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. - -{{% tabs %}} -{{% tab "K3s" %}} - -The K3s server needs port 6443 to be accessible by the nodes. - -The nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. However, if you do not use Flannel and provide your own custom CNI, then port 8472 is not needed by K3s. - -If you wish to utilize the metrics server, you will need to open port 10250 on each node. - -> **Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. - -
Inbound Rules for Rancher Server Nodes
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 6443 | K3s server nodes | Kubernetes API -| UDP | 8472 | K3s server and agent nodes | Required only for Flannel VXLAN. -| TCP | 10250 | K3s server and agent nodes | kubelet - -Typically all outbound traffic is allowed. - -{{% /tab %}} -{{% tab "RKE" %}} -
Inbound Rules for Rancher Nodes
- -| Protocol | Port | Source | Description | -|-----|-----|----------------|---| -| TCP | 80 | Load Balancer/Reverse Proxy | HTTP traffic to Rancher UI/API | -| TCP | 443 |
  • Load Balancer/Reverse Proxy
  • IPs of all cluster nodes and other API/UI clients
| HTTPS traffic to Rancher UI/API | - -
Outbound Rules for Rancher Nodes
- -| Protocol | Port | Destination | Description | -|-----|-----|----------------|---| -| TCP | 443 | `35.160.43.145`,`35.167.242.46`,`52.33.59.17` | Rancher catalog (git.rancher.io) | -| TCP | 22 | Any node created using a node driver | SSH provisioning of node by node driver | -| TCP | 2376 | Any node created using a node driver | Docker daemon TLS port used by node driver | -| TCP | Provider dependent | Port of the Kubernetes API endpoint in hosted cluster | Kubernetes API | - -{{% /tab %}} -{{% tab "Docker" %}} - -
Inbound Rules for Rancher Node
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used -| TCP | 443 |
  • hosted/imported Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl - -
Outbound Rules for Rancher Node
- -| Protocol | Port | Source | Description | -|-----|-----|----------------|---| -| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | -| TCP | 443 | `35.160.43.145/32`,`35.167.242.46/32`,`52.33.59.17/32` | git.rancher.io (catalogs) | -| TCP | 2376 | Any node IP from a node created using a node driver | Docker daemon TLS port used by Docker Machine | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | - -{{% /tab %}} -{{% /tabs %}} - -> **Notes:** -> -> - Rancher nodes may also require additional outbound access for any external authentication provider which is configured (LDAP for example). -> - Kubernetes recommends TCP 30000-32767 for node port services. -> - For firewalls, traffic may need to be enabled within the cluster and pod CIDR. - -## Downstream Kubernetes Cluster Nodes - -Downstream Kubernetes clusters run your apps and services. This section describes what ports need to be opened on the nodes in downstream clusters so that Rancher can communicate with them. - -The port requirements differ depending on how the downstream cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster types]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-options). - ->**Tip:** -> ->If security isn't a large concern and you're okay with opening a few additional ports, you can use the table in [Commonly Used Ports](#commonly-used-ports) as your port reference instead of the comprehensive tables below. - -{{% tabs %}} - -{{% tab "Node Pools" %}} - -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with nodes created in an [Infrastructure Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). - ->**Note:** ->The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. - -{{< ports-iaas-nodes >}} - -{{% /tab %}} - -{{% tab "Custom Nodes" %}} - -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with [Custom Nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/). - -{{< ports-custom-nodes >}} - -{{% /tab %}} - -{{% tab "Hosted Clusters" %}} - -The following table depicts the port requirements for [hosted clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters). - -{{< ports-imported-hosted >}} - -{{% /tab %}} - -{{% tab "Imported Clusters" %}} - -The following table depicts the port requirements for [imported clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/). - -{{< ports-imported-hosted >}} - -{{% /tab %}} - -{{% /tabs %}} - - -## Other Port Considerations - -### Commonly Used Ports - -These ports are typically opened on your Kubernetes nodes, regardless of what type of cluster it is. - -| Protocol | Port | Description | -|:--------: |:----------------: |------------------------------------------------- | -| TCP | 22 | Node driver SSH provisioning | -| TCP | 2376 | Node driver Docker daemon TLS port | -| TCP | 2379 | etcd client requests | -| TCP | 2380 | etcd peer communication | -| UDP | 8472 | Canal/Flannel VXLAN overlay networking | -| UDP | 4789 | Flannel VXLAN overlay networking on Windows cluster | -| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | -| TCP | 9796 | Default port required by Monitoring to scrape metrics | -| TCP | 6783 | Weave Port | -| UDP | 6783-6784 | Weave UDP Ports | -| TCP | 10250 | kubelet API | -| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | -| TCP/UDP | 30000-
32767 | NodePort port range | - ----- - -### Local Node Traffic - -Ports marked as `local traffic` (i.e., `9099 TCP`) in the above requirements are used for Kubernetes healthchecks (`livenessProbe` and`readinessProbe`). -These healthchecks are executed on the node itself. In most cloud environments, this local traffic is allowed by default. - -However, this traffic may be blocked when: - -- You have applied strict host firewall policies on the node. -- You are using nodes that have multiple interfaces (multihomed). - -In these cases, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as source or destination in your security group, explicitly opening ports only applies to the private interface of the nodes / instances. - -### Rancher AWS EC2 security group - -When using the [AWS EC2 node driver]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. - -| Type | Protocol | Port Range | Source/Destination | Rule Type | -|-----------------|:--------:|:-----------:|------------------------|:---------:| -| SSH | TCP | 22 | 0.0.0.0/0 | Inbound | -| HTTP | TCP | 80 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 443 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 2376 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 2379-2380 | sg-xxx (rancher-nodes) | Inbound | -| Custom UDP Rule | UDP | 4789 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 6443 | 0.0.0.0/0 | Inbound | -| Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | -| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | -| All traffic | All | All | 0.0.0.0/0 | Outbound | diff --git a/content/rancher/v2.5/en/ecm/restoring-from-backup/_index.md b/content/rancher/v2.5/en/ecm/restoring-from-backup/_index.md deleted file mode 100644 index 7dd374e363b..00000000000 --- a/content/rancher/v2.5/en/ecm/restoring-from-backup/_index.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Restoring an RKE Kubernetes Cluster from Backup -weight: 8 ---- - -etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. - -Rancher recommends enabling the [ability to set up recurring snapshots of etcd]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots), but [one-time snapshots]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). - -As of Rancher v2.4.0, clusters can also be restored to a prior Kubernetes version and cluster configuration. - -This section covers the following topics: - -- [Viewing Available Snapshots](#viewing-available-snapshots) -- [Restoring a Cluster from a Snapshot](#restoring-a-cluster-from-a-snapshot) -- [Recovering etcd without a Snapshot](#recovering-etcd-without-a-snapshot) -- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) - -## Viewing Available Snapshots - -The list of all available snapshots for the cluster is available. - -1. In the **Global** view, navigate to the cluster that you want to view snapshots. - -2. Click **Tools > Snapshots** from the navigation bar to view the list of saved snapshots. These snapshots include a timestamp of when they were created. - -## Restoring a Cluster from a Snapshot - -If your Kubernetes cluster is broken, you can restore the cluster from a snapshot. - -Restorations changed in Rancher v2.4.0. - -{{% tabs %}} -{{% tab "Rancher v2.4.0+" %}} - -Snapshots are composed of the cluster data in etcd, the Kubernetes version, and the cluster configuration in the `cluster.yml.` These components allow you to select from the following options when restoring a cluster from a snapshot: - -- **Restore just the etcd contents:** This restoration is similar to restoring to snapshots in Rancher prior to v2.4.0. -- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. -- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. - -When rolling back to a prior Kubernetes version, the [upgrade strategy options]({{}}/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/#configuring-the-upgrade-strategy) are ignored. Worker nodes are not cordoned or drained before being reverted to the older Kubernetes version, so that an unhealthy cluster can be more quickly restored to a healthy state. - -> **Prerequisite:** To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots) - -1. In the **Global** view, navigate to the cluster that you want to restore from a snapshots. - -2. Click the **⋮ > Restore Snapshot**. - -3. Select the snapshot that you want to use for restoring your cluster from the dropdown of available snapshots. - -4. In the **Restoration Type** field, choose one of the restoration options described above. - -5. Click **Save**. - -**Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. - -{{% /tab %}} -{{% tab "Rancher prior to v2.4.0" %}} - -> **Prerequisites:** -> -> - Make sure your etcd nodes are healthy. If you are restoring a cluster with unavailable etcd nodes, it's recommended that all etcd nodes are removed from Rancher before attempting to restore. For clusters in which Rancher used node pools to provision [nodes in an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/), new etcd nodes will automatically be created. For [custom clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/), please ensure that you add new etcd nodes to the cluster. -> - To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots) - -1. In the **Global** view, navigate to the cluster that you want to restore from a snapshot. - -2. Click the **⋮ > Restore Snapshot**. - -3. Select the snapshot that you want to use for restoring your cluster from the dropdown of available snapshots. - -4. Click **Save**. - -**Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. - -{{% /tab %}} -{{% /tabs %}} - -## Recovering etcd without a Snapshot - -If the group of etcd nodes loses quorum, the Kubernetes cluster will report a failure because no operations, e.g. deploying workloads, can be executed in the Kubernetes cluster. Please review the best practices for the what the [number of etcd nodes]({{}}/rancher/v2.x/en/cluster-provisioning/production/#count-of-etcd-nodes) should be in a Kubernetes cluster. If you want to recover your set of etcd nodes, follow these instructions: - -1. Keep only one etcd node in the cluster by removing all other etcd nodes. - -2. On the single remaining etcd node, run the following command: - - ``` - $ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock assaflavie/runlike etcd - ``` - - This command outputs the running command for etcd, save this command to use later. - -3. Stop the etcd container that you launched in the previous step and rename it to `etcd-old`. - - ``` - $ docker stop etcd - $ docker rename etcd etcd-old - ``` - -4. Take the saved command from Step 2 and revise it: - - - If you originally had more than 1 etcd node, then you need to change `--initial-cluster` to only contain the node that remains. - - Add `--force-new-cluster` to the end of the command. - -5. Run the revised command. - -6. After the single nodes is up and running, Rancher recommends adding additional etcd nodes to your cluster. If you have a [custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes) and you want to reuse an old node, you are required to [clean up the nodes]({{}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) before attempting to add them back into a cluster. - -# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 - -If you have any Rancher launched Kubernetes clusters that were created prior to v2.2.0, after upgrading Rancher, you must [edit the cluster]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots prior to v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/). diff --git a/content/rancher/v2.5/en/ecm/rke-templates/_index.md b/content/rancher/v2.5/en/ecm/rke-templates/_index.md deleted file mode 100644 index 5779b8eb458..00000000000 --- a/content/rancher/v2.5/en/ecm/rke-templates/_index.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: Managing RKE Cluster Templates -weight: 9 ---- - -RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. - -RKE is the [Rancher Kubernetes Engine,]({{}}/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. - -With Kubernetes increasing in popularity, there is a trend toward managing a larger number of smaller clusters. When you want to create many clusters, it’s more important to manage them consistently. Multi-cluster management comes with challenges to enforcing security and add-on configurations that need to be standardized before turning clusters over to end users. - -RKE templates help standardize these configurations. Regardless of whether clusters are created with the Rancher UI, the Rancher API, or an automated process, Rancher will guarantee that every cluster it provisions from an RKE template is uniform and consistent in the way it is produced. - -Admins control which cluster options can be changed by end users. RKE templates can also be shared with specific users and groups, so that admins can create different RKE templates for different sets of users. - -If a cluster was created with an RKE template, you can't change it to a different RKE template. You can only update the cluster to a new revision of the same template. - -As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. The new template can also be used to launch new clusters. - -The core features of RKE templates allow DevOps and security teams to: - -- Standardize cluster configuration and ensure that Rancher-provisioned clusters are created following best practices -- Prevent less technical users from making uninformed choices when provisioning clusters -- Share different templates with different sets of users and groups -- Delegate ownership of templates to users who are trusted to make changes to them -- Control which users can create templates -- Require users to create clusters from a template - -# Configurable Settings - -RKE templates can be created in the Rancher UI or defined in YAML format. They can define all the same parameters that can be specified when you use Rancher to provision custom nodes or nodes from an infrastructure provider: - -- Cloud provider options -- Pod security options -- Network providers -- Ingress controllers -- Network security configuration -- Network plugins -- Private registry URL and credentials -- Add-ons -- Kubernetes options, including configurations for Kubernetes components such as kube-api, kube-controller, kubelet, and services - -The [add-on section](#add-ons) of an RKE template is especially powerful because it allows a wide range of customization options. - -# Scope of RKE Templates - -RKE templates are supported for Rancher-provisioned clusters. The templates can be used to provision custom clusters or clusters that are launched by an infrastructure provider. - -RKE templates are for defining Kubernetes and Rancher settings. Node templates are responsible for configuring nodes. For tips on how to use RKE templates in conjunction with hardware, refer to [RKE Templates and Hardware]({{}}/rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware). - -RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. - -As of v2.3.3, the settings of an existing cluster can be [saved as an RKE template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) This creates a new template and binds the cluster settings to the template, so that the cluster can only be upgraded if the [template is updated]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template), and the cluster is upgraded to [use a newer version of the template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) The new template can also be used to create new clusters. - - -# Example Scenarios -When an organization has both basic and advanced Rancher users, administrators might want to give the advanced users more options for cluster creation, while restricting the options for basic users. - -These [example scenarios]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios) describe how an organization could use templates to standardize cluster creation. - -Some of the example scenarios include the following: - -- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. -- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/#templates-for-basic-and-advanced-users) so that basic users can have more restricted options and advanced users can use more discretion when creating clusters. -- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/#updating-templates-and-clusters-created-with-them) and clusters created from the template can [upgrade to the new version]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) of the template. -- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to share ownership of the template, this scenario describes how [template ownership can be shared.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/#allowing-other-users-to-control-and-share-a-template) - -# Template Management - -When you create an RKE template, it is available in the Rancher UI from the **Global** view under **Tools > RKE Templates.** When you create a template, you become the template owner, which gives you permission to revise and share the template. You can share the RKE templates with specific users or groups, and you can also make it public. - -Administrators can turn on template enforcement to require users to always use RKE templates when creating a cluster. This allows administrators to guarantee that Rancher always provisions clusters with specific settings. - -RKE template updates are handled through a revision system. If you want to change or update a template, you create a new revision of the template. Then a cluster that was created with the older version of the template can be upgraded to the new template revision. - -In an RKE template, settings can be restricted to what the template owner chooses, or they can be open for the end user to select the value. The difference is indicated by the **Allow User Override** toggle over each setting in the Rancher UI when the template is created. - -For the settings that cannot be overridden, the end user will not be able to directly edit them. In order for a user to get different options of these settings, an RKE template owner would need to create a new revision of the RKE template, which would allow the user to upgrade and change that option. - -The documents in this section explain the details of RKE template management: - -- [Getting permission to create templates]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/) -- [Creating and revising templates]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/) -- [Enforcing template settings]({{}}/rancher/v2.x/en/admin-settings/rke-templates/enforcement/#requiring-new-clusters-to-use-a-cluster-template) -- [Overriding template settings]({{}}/rancher/v2.x/en/admin-settings/rke-templates/overrides/) -- [Sharing templates with cluster creators]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-templates-with-specific-users) -- [Sharing ownership of a template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) - -An [example YAML configuration file for a template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-yaml) is provided for reference. - -# Applying Templates - -You can [create a cluster from a template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-a-cluster-template) that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing) - -If the RKE template owner creates a new revision of the template, you can [upgrade your cluster to that revision.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#updating-a-cluster-created-with-an-rke-template) - -RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. - -As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. - -# Standardizing Hardware - -RKE templates are designed to standardize Kubernetes and Rancher settings. If you want to standardize your infrastructure as well, you use RKE templates [in conjunction with other tools]({{}}/rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware). - -# YAML Customization - -If you define an RKE template as a YAML file, you can modify this [example RKE template YAML]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-yaml). The YAML in the RKE template uses the same customization that Rancher uses when creating an RKE cluster, but since the YAML is located within the context of a Rancher provisioned cluster, you will need to nest the RKE template customization under the `rancher_kubernetes_engine_config` directive in the YAML. - -The RKE documentation also has [annotated]({{}}/rke/latest/en/example-yamls/) `cluster.yml` files that you can use for reference. - -For guidance on available options, refer to the RKE documentation on [cluster configuration.]({{}}/rke/latest/en/config-options/) - -### Add-ons - -The add-on section of the RKE template configuration file works the same way as the [add-on section of a cluster configuration file]({{}}/rke/latest/en/config-options/add-ons/). - -The user-defined add-ons directive allows you to either call out and pull down Kubernetes manifests or put them inline directly. If you include these manifests as part of your RKE template, Rancher will provision those in the cluster. - -Some things you could do with add-ons include: - -- Install applications on the Kubernetes cluster after it starts -- Install plugins on nodes that are deployed with a Kubernetes daemonset -- Automatically set up namespaces, service accounts, or role binding - -The RKE template configuration must be nested within the `rancher_kubernetes_engine_config` directive. To set add-ons, when creating the template, you will click **Edit as YAML.** Then use the `addons` directive to add a manifest, or the `addons_include` directive to set which YAML files are used for the add-ons. For more information on custom add-ons, refer to the [user-defined add-ons documentation.]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) diff --git a/content/rancher/v2.5/en/ecm/rke-templates/applying-templates/_index.md b/content/rancher/v2.5/en/ecm/rke-templates/applying-templates/_index.md deleted file mode 100644 index 3dc23ad4a35..00000000000 --- a/content/rancher/v2.5/en/ecm/rke-templates/applying-templates/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Applying Templates -weight: 50 ---- - -You can create a cluster from an RKE template that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing) - -RKE templates can be applied to new clusters. - -As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. - -You can't change a cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. - -This section covers the following topics: - -- [Creating a cluster from an RKE template](#creating-a-cluster-from-an-rke-template) -- [Updating a cluster created with an RKE template](#updating-a-cluster-created-with-an-rke-template) -- [Converting an existing cluster to use an RKE template](#converting-an-existing-cluster-to-use-an-rke-template) - -### Creating a Cluster from an RKE Template - -To add a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters) using an RKE template, use these steps: - -1. From the **Global** view, go to the **Clusters** tab. -1. Click **Add Cluster** and choose the infrastructure provider. -1. Provide the cluster name and node template details as usual. -1. To use an RKE template, under the **Cluster Options**, check the box for **Use an existing RKE template and revision.** -1. Choose an existing template and revision from the dropdown menu. -1. Optional: You can edit any settings that the RKE template owner marked as **Allow User Override** when the template was created. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. Then you will need to edit the cluster to upgrade it to the new revision. -1. Click **Save** to launch the cluster. - -### Updating a Cluster Created with an RKE Template - -When the template owner creates a template, each setting has a switch in the Rancher UI that indicates if users can override the setting. - -- If the setting allows a user override, you can update these settings in the cluster by [editing the cluster.]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) -- If the switch is turned off, you cannot change these settings unless the cluster owner creates a template revision that lets you override them. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. - -If a cluster was created from an RKE template, you can edit the cluster to update the cluster to a new revision of the template. - -As of Rancher v2.3.3, an existing cluster's settings can be [saved as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) In that situation, you can also edit the cluster to update the cluster to a new revision of the template. - -> **Note:** You can't change the cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. - -### Converting an Existing Cluster to Use an RKE Template - -This section describes how to create an RKE template from an existing cluster. - -RKE templates cannot be applied to existing clusters, except if you save an existing cluster's settings as an RKE template. This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) and the cluster is upgraded to [use a newer version of the template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) - -To convert an existing cluster to use an RKE template, - -1. From the **Global** view in Rancher, click the **Clusters** tab. -1. Go to the cluster that will be converted to use an RKE template. Click **⋮** > **Save as RKE Template.** -1. Enter a name for the template in the form that appears, and click **Create.** - -**Results:** - -- A new RKE template is created. -- The cluster is converted to use the new template. -- New clusters can be [created from the new template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/rke-templates/creating-and-revising/_index.md b/content/rancher/v2.5/en/ecm/rke-templates/creating-and-revising/_index.md deleted file mode 100644 index 10935277fa9..00000000000 --- a/content/rancher/v2.5/en/ecm/rke-templates/creating-and-revising/_index.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: Creating and Revising Templates -weight: 32 ---- - -This section describes how to manage RKE templates and revisions. You an create, share, update, and delete templates from the **Global** view under **Tools > RKE Templates.** - -Template updates are handled through a revision system. When template owners want to change or update a template, they create a new revision of the template. Individual revisions cannot be edited. However, if you want to prevent a revision from being used to create a new cluster, you can disable it. - -Template revisions can be used in two ways: to create a new cluster, or to upgrade a cluster that was created with an earlier version of the template. The template creator can choose a default revision, but when end users create a cluster, they can choose any template and any template revision that is available to them. After the cluster is created from a specific revision, it cannot change to another template, but the cluster can be upgraded to a newer available revision of the same template. - -The template owner has full control over template revisions, and can create new revisions to update the template, delete or disable revisions that should not be used to create clusters, and choose which template revision is the default. - -This section covers the following topics: - -- [Prerequisites](#prerequisites) -- [Creating a template](#creating-a-template) -- [Updating a template](#updating-a-template) -- [Deleting a template](#deleting-a-template) -- [Creating a revision based on the default revision](#creating-a-revision-based-on-the-default-revision) -- [Creating a revision based on a cloned revision](#creating-a-revision-based-on-a-cloned-revision) -- [Disabling a template revision](#disabling-a-template-revision) -- [Re-enabling a disabled template revision](#re-enabling-a-disabled-template-revision) -- [Setting a template revision as default](#setting-a-template-revision-as-default) -- [Deleting a template revision](#deleting-a-template-revision) -- [Upgrading a cluster to use a new template revision](#upgrading-a-cluster-to-use-a-new-template-revision) -- [Exporting a running cluster to a new RKE template and revision](#exporting-a-running-cluster-to-a-new-rke-template-and-revision) - -### Prerequisites - -You can create RKE templates if you have the **Create RKE Templates** permission, which can be [given by an administrator.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions) - -You can revise, share, and delete a template if you are an owner of the template. For details on how to become an owner of a template, refer to [the documentation on sharing template ownership.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) - -### Creating a Template - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Click **Add Template.** -1. Provide a name for the template. An auto-generated name is already provided for the template' first version, which is created along with this template. -1. Optional: Share the template with other users or groups by [adding them as members.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-templates-with-specific-users) You can also make the template public to share with everyone in the Rancher setup. -1. Then follow the form on screen to save the cluster configuration parameters as part of the template's revision. The revision can be marked as default for this template. - -**Result:** An RKE template with one revision is configured. You can use this RKE template revision later when you [provision a Rancher-launched cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters). After a cluster is managed by an RKE template, it cannot be disconnected and the option to uncheck **Use an existing RKE Template and Revision** will be unavailable. - -### Updating a Template - -When you update an RKE template, you are creating a revision of the existing template. Clusters that were created with an older version of the template can be updated to match the new revision. - -You can't edit individual revisions. Since you can't edit individual revisions of a template, in order to prevent a revision from being used, you can [disable it.](#disabling-a-template-revision) - -When new template revisions are created, clusters using an older revision of the template are unaffected. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template that you want to edit and click the **⋮ > Edit.** -1. Edit the required information and click **Save.** -1. Optional: You can change the default revision of this template and also change who it is shared with. - -**Result:** The template is updated. To apply it to a cluster using an older version of the template, refer to the section on [upgrading a cluster to use a new revision of a template.](#upgrading-a-cluster-to-use-a-new-template-revision) - -### Deleting a Template - -When you no longer use an RKE template for any of your clusters, you can delete it. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template that you want to delete and click the **⋮ > Delete.** -1. Confirm the deletion when prompted. - -**Result:** The template is deleted. - -### Creating a Revision Based on the Default Revision - -You can clone the default template revision and quickly update its settings rather than creating a new revision from scratch. Cloning templates saves you the hassle of re-entering the access keys and other parameters needed for cluster creation. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template that you want to clone and click the **⋮ > New Revision From Default.** -1. Complete the rest of the form to create a new revision. - -**Result:** The RKE template revision is cloned and configured. - -### Creating a Revision Based on a Cloned Revision - -When creating new RKE template revisions from your user settings, you can clone an existing revision and quickly update its settings rather than creating a new one from scratch. Cloning template revisions saves you the hassle of re-entering the cluster parameters. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template revision you want to clone. Then select **⋮ > Clone Revision.** -1. Complete the rest of the form. - -**Result:** The RKE template revision is cloned and configured. You can use the RKE template revision later when you provision a cluster. Any existing cluster using this RKE template can be upgraded to this new revision. - -### Disabling a Template Revision - -When you no longer want an RKE template revision to be used for creating new clusters, you can disable it. A disabled revision can be re-enabled. - -You can disable the revision if it is not being used by any cluster. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template revision you want to disable. Then select **⋮ > Disable.** - -**Result:** The RKE template revision cannot be used to create a new cluster. - -### Re-enabling a Disabled Template Revision - -If you decide that a disabled RKE template revision should be used to create new clusters, you can re-enable it. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template revision you want to re-enable. Then select **⋮ > Enable.** - -**Result:** The RKE template revision can be used to create a new cluster. - -### Setting a Template Revision as Default - -When end users create a cluster using an RKE template, they can choose which revision to create the cluster with. You can configure which revision is used by default. - -To set an RKE template revision as default, - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template revision that should be default and click the **⋮ > Set as Default.** - -**Result:** The RKE template revision will be used as the default option when clusters are created with the template. - -### Deleting a Template Revision - -You can delete all revisions of a template except for the default revision. - -To permanently delete a revision, - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template revision that should be deleted and click the **⋮ > Delete.** - -**Result:** The RKE template revision is deleted. - -### Upgrading a Cluster to Use a New Template Revision - -> This section assumes that you already have a cluster that [has an RKE template applied.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates) -> This section also assumes that you have [updated the template that the cluster is using](#updating-a-template) so that a new template revision is available. - -To upgrade a cluster to use a new template revision, - -1. From the **Global** view in Rancher, click the **Clusters** tab. -1. Go to the cluster that you want to upgrade and click **⋮ > Edit.** -1. In the **Cluster Options** section, click the dropdown menu for the template revision, then select the new template revision. -1. Click **Save.** - -**Result:** The cluster is upgraded to use the settings defined in the new template revision. - -### Exporting a Running Cluster to a New RKE Template and Revision - -You can save an existing cluster's settings as an RKE template. - -This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) and the cluster is upgraded to [use a newer version of the template.] - -To convert an existing cluster to use an RKE template, - -1. From the **Global** view in Rancher, click the **Clusters** tab. -1. Go to the cluster that will be converted to use an RKE template. Click **⋮** > **Save as RKE Template.** -1. Enter a name for the template in the form that appears, and click **Create.** - -**Results:** - -- A new RKE template is created. -- The cluster is converted to use the new template. -- New clusters can be [created from the new template and revision.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/rke-templates/creator-permissions/_index.md b/content/rancher/v2.5/en/ecm/rke-templates/creator-permissions/_index.md deleted file mode 100644 index 0773da504e3..00000000000 --- a/content/rancher/v2.5/en/ecm/rke-templates/creator-permissions/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Template Creator Permissions -weight: 10 ---- - -Administrators have the permission to create RKE templates, and only administrators can give that permission to other users. - -For more information on administrator permissions, refer to the [documentation on global permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/). - -# Giving Users Permission to Create Templates - -Templates can only be created by users who have the global permission **Create RKE Templates.** - -Administrators have the global permission to create templates, and only administrators can give that permission to other users. - -For information on allowing users to modify existing templates, refer to [Sharing Templates.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing) - -Administrators can give users permission to create RKE templates in two ways: - -- By editing the permissions of an [individual user](#allowing-a-user-to-create-templates) -- By changing the [default permissions of new users](#allowing-new-users-to-create-templates-by-default) - -### Allowing a User to Create Templates - -An administrator can individually grant the role **Create RKE Templates** to any existing user by following these steps: - -1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **⋮ > Edit.** -1. In the **Global Permissions** section, choose **Custom** and select the **Create RKE Templates** role along with any other roles the user should have. Click **Save.** - -**Result:** The user has permission to create RKE templates. - -### Allowing New Users to Create Templates by Default - -Alternatively, the administrator can give all new users the default permission to create RKE templates by following the following steps. This will not affect the permissions of existing users. - -1. From the **Global** view, click **Security > Roles.** -1. Under the **Global** roles tab, go to the role **Create RKE Templates** and click the **⋮ > Edit**. -1. Select the option **Yes: Default role for new users** and click **Save.** - -**Result:** Any new user created in this Rancher installation will be able to create RKE templates. Existing users will not get this permission. - -### Revoking Permission to Create Templates - -Administrators can remove a user's permission to create templates with the following steps: - -1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **⋮ > Edit.** -1. In the **Global Permissions** section, un-check the box for **Create RKE Templates**. In this section, you can change the user back to a standard user, or give the user a different set of custom permissions. -1. Click **Save.** - -**Result:** The user cannot create RKE templates. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/rke-templates/enforcement/_index.md b/content/rancher/v2.5/en/ecm/rke-templates/enforcement/_index.md deleted file mode 100644 index a1fa1e79ddb..00000000000 --- a/content/rancher/v2.5/en/ecm/rke-templates/enforcement/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Template Enforcement -weight: 32 ---- - -This section describes how template administrators can enforce templates in Rancher, restricting the ability of users to create clusters without a template. - -By default, any standard user in Rancher can create clusters. But when RKE template enforcement is turned on, - -- Only an administrator has the ability to create clusters without a template. -- All standard users must use an RKE template to create a new cluster. -- Standard users cannot create a cluster without using a template. - -Users can only create new templates if the administrator [gives them permission.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/#allowing-a-user-to-create-templates) - -After a cluster is created with an RKE template, the cluster creator cannot edit settings that are defined in the template. The only way to change those settings after the cluster is created is to [upgrade the cluster to a new revision]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#updating-a-cluster-created-with-an-rke-template) of the same template. If cluster creators want to change template-defined settings, they would need to contact the template owner to get a new revision of the template. For details on how template revisions work, refer to the [documentation on revising templates.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) - -# Requiring New Clusters to Use an RKE Template - -You might want to require new clusters to use a template to ensure that any cluster launched by a [standard user]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) will use the Kubernetes and/or Rancher settings that are vetted by administrators. - -To require new clusters to use an RKE template, administrators can turn on RKE template enforcement with the following steps: - -1. From the **Global** view, click the **Settings** tab. -1. Go to the `cluster-template-enforcement` setting. Click the vertical **⋮** and click **Edit.** -1. Set the value to **True** and click **Save.** - -**Result:** All clusters provisioned by Rancher must use a template, unless the creator is an administrator. - -# Disabling RKE Template Enforcement - -To allow new clusters to be created without an RKE template, administrators can turn off RKE template enforcement with the following steps: - -1. From the **Global** view, click the **Settings** tab. -1. Go to the `cluster-template-enforcement` setting. Click the vertical **⋮** and click **Edit.** -1. Set the value to **False** and click **Save.** - -**Result:** When clusters are provisioned by Rancher, they don't need to use a template. diff --git a/content/rancher/v2.5/en/ecm/rke-templates/example-scenarios/_index.md b/content/rancher/v2.5/en/ecm/rke-templates/example-scenarios/_index.md deleted file mode 100644 index 4e93e102c62..00000000000 --- a/content/rancher/v2.5/en/ecm/rke-templates/example-scenarios/_index.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Example Scenarios -weight: 5 ---- - -These example scenarios describe how an organization could use templates to standardize cluster creation. - -- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone](#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. -- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,](#templates-for-basic-and-advanced-users) so that basic users have more restricted options and advanced users have more discretion when creating clusters. -- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision](#updating-templates-and-clusters-created-with-them) and clusters created from the template can upgrade to the new version of the template. -- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to delegate ownership of the template, this scenario describes how [template ownership can be shared.](#allowing-other-users-to-control-and-share-a-template) - - -# Enforcing a Template Setting for Everyone - -Let's say there is an organization in which the administrators decide that all new clusters should be created with Kubernetes version 1.14. - -1. First, an administrator creates a template which specifies the Kubernetes version as 1.14 and marks all other settings as **Allow User Override**. -1. The administrator makes the template public. -1. The administrator turns on template enforcement. - -**Results:** - -- All Rancher users in the organization have access to the template. -- All new clusters created by [standard users]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) with this template will use Kubernetes 1.14 and they are unable to use a different Kubernetes version. By default, standard users don't have permission to create templates, so this template will be the only template they can use unless more templates are shared with them. -- All standard users must use a cluster template to create a new cluster. They cannot create a cluster without using a template. - -In this way, the administrators enforce the Kubernetes version across the organization, while still allowing end users to configure everything else. - -# Templates for Basic and Advanced Users - -Let's say an organization has both basic and advanced users. Administrators want the basic users to be required to use a template, while the advanced users and administrators create their clusters however they want. - -1. First, an administrator turns on [RKE template enforcement.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/enforcement/#requiring-new-clusters-to-use-a-cluster-template) This means that every [standard user]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) in Rancher will need to use an RKE template when they create a cluster. -1. The administrator then creates two templates: - - - One template for basic users, with almost every option specified except for access keys - - One template for advanced users, which has most or all options has **Allow User Override** turned on - -1. The administrator shares the advanced template with only the advanced users. -1. The administrator makes the template for basic users public, so the more restrictive template is an option for everyone who creates a Rancher-provisioned cluster. - -**Result:** All Rancher users, except for administrators, are required to use a template when creating a cluster. Everyone has access to the restrictive template, but only advanced users have permission to use the more permissive template. The basic users are more restricted, while advanced users have more freedom when configuring their Kubernetes clusters. - -# Updating Templates and Clusters Created with Them - -Let's say an organization has a template that requires clusters to use Kubernetes v1.14. However, as time goes on, the administrators change their minds. They decide they want users to be able to upgrade their clusters to use newer versions of Kubernetes. - -In this organization, many clusters were created with a template that requires Kubernetes v1.14. Because the template does not allow that setting to be overridden, the users who created the cluster cannot directly edit that setting. - -The template owner has several options for allowing the cluster creators to upgrade Kubernetes on their clusters: - -- **Specify Kubernetes v1.15 on the template:** The template owner can create a new template revision that specifies Kubernetes v1.15. Then the owner of each cluster that uses that template can upgrade their cluster to a new revision of the template. This template upgrade allows the cluster creator to upgrade Kubernetes to v1.15 on their cluster. -- **Allow any Kubernetes version on the template:** When creating a template revision, the template owner can also mark the the Kubernetes version as **Allow User Override** using the switch near that setting on the Rancher UI. This will allow clusters that upgrade to this template revision to use any version of Kubernetes. -- **Allow the latest minor Kubernetes version on the template:** The template owner can also create a template revision in which the Kubernetes version is defined as **Latest v1.14 (Allows patch version upgrades).** This means clusters that use that revision will be able to get patch version upgrades, but major version upgrades will not be allowed. - -# Allowing Other Users to Control and Share a Template - -Let's say Alice is a Rancher administrator. She owns an RKE template that reflects her organization's agreed-upon best practices for creating a cluster. - -Bob is an advanced user who can make informed decisions about cluster configuration. Alice trusts Bob to create new revisions of her template as the best practices get updated over time. Therefore, she decides to make Bob an owner of the template. - -To share ownership of the template with Bob, Alice [adds Bob as an owner of her template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) - -The result is that as a template owner, Bob is in charge of version control for that template. Bob can now do all of the following: - -- [Revise the template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) when the best practices change -- [Disable outdated revisions]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#disabling-a-template-revision) of the template so that no new clusters can be created with it -- [Delete the whole template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#deleting-a-template) if the organization wants to go in a different direction -- [Set a certain revision as default]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#setting-a-template-revision-as-default) when users create a cluster with it. End users of the template will still be able to choose which revision they want to create the cluster with. -- [Share the template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing) with specific users, make the template available to all Rancher users, or share ownership of the template with another user. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/rke-templates/example-yaml/_index.md b/content/rancher/v2.5/en/ecm/rke-templates/example-yaml/_index.md deleted file mode 100644 index 3c85e86d616..00000000000 --- a/content/rancher/v2.5/en/ecm/rke-templates/example-yaml/_index.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Example YAML -weight: 60 ---- - -Below is an example RKE template configuration file for reference. - -The YAML in the RKE template uses the same customization that is used when you create an RKE cluster. However, since the YAML is within the context of a Rancher provisioned RKE cluster, the customization from the RKE docs needs to be nested under the `rancher_kubernetes_engine` directive. - -```yaml -# -# Cluster Config -# -docker_root_dir: /var/lib/docker - -enable_cluster_alerting: false -# This setting is not enforced. Clusters -# created with this sample template -# would have alerting turned off by default, -# but end users could still turn alerting -# on or off. - -enable_cluster_monitoring: true -# This setting is not enforced. Clusters -# created with this sample template -# would have monitoring turned on -# by default, but end users could still -# turn monitoring on or off. - -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: # Your RKE template config goes here. - addon_job_timeout: 30 - authentication: - strategy: x509 - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.15.3-rancher3-1 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 0 - retention: 72h - snapshot: false - uid: 0 - kube_api: - always_pull_images: false - pod_security_policy: false - service_node_port_range: 30000-32767 - ssh_agent_auth: false -windows_prefered_cluster: false -``` \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/rke-templates/overrides/_index.md b/content/rancher/v2.5/en/ecm/rke-templates/overrides/_index.md deleted file mode 100644 index bb5f00d4b9e..00000000000 --- a/content/rancher/v2.5/en/ecm/rke-templates/overrides/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Overriding Template Settings -weight: 33 ---- - -When a user creates an RKE template, each setting in the template has a switch in the Rancher UI that indicates if users can override the setting. This switch marks those settings as **Allow User Override.** - -After a cluster is created with a template, end users can't update any of the settings defined in the template unless the template owner marked them as **Allow User Override.** However, if the template is [updated to a new revision]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising) that changes the settings or allows end users to change them, the cluster can be upgraded to a new revision of the template and the changes in the new revision will be applied to the cluster. - -When any parameter is set as **Allow User Override** on the RKE template, it means that end users have to fill out those fields during cluster creation and they can edit those settings afterward at any time. - -The **Allow User Override** model of the RKE template is useful for situations such as: - -- Administrators know that some settings will need the flexibility to be frequently updated over time -- End users will need to enter their own access keys or secret keys, for example, cloud credentials or credentials for backup snapshots \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/rke-templates/rke-templates-and-hardware/_index.md b/content/rancher/v2.5/en/ecm/rke-templates/rke-templates-and-hardware/_index.md deleted file mode 100644 index 67ca181a964..00000000000 --- a/content/rancher/v2.5/en/ecm/rke-templates/rke-templates-and-hardware/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: RKE Templates and Infrastructure -weight: 90 ---- - -In Rancher, RKE templates are used to provision Kubernetes and define Rancher settings, while node templates are used to provision nodes. - -Therefore, even if RKE template enforcement is turned on, the end user still has flexibility when picking the underlying hardware when creating a Rancher cluster. The end users of an RKE template can still choose an infrastructure provider and the nodes they want to use. - -If you want to standardize the hardware in your clusters, use RKE templates conjunction with node templates or with a server provisioning tool such as Terraform. - -### Node Templates - -[Node templates]({{}}/rancher/v2.x/en/user-settings/node-templates) are responsible for node configuration and node provisioning in Rancher. From your user profile, you can set up node templates to define which templates are used in each of your node pools. With node pools enabled, you can make sure you have the required number of nodes in each node pool, and ensure that all nodes in the pool are the same. - -### Terraform - -Terraform is a server provisioning tool. It uses infrastructure-as-code that lets you create almost every aspect of your infrastructure with Terraform configuration files. It can automate the process of server provisioning in a way that is self-documenting and easy to track in version control. - -This section focuses on how to use Terraform with the [Rancher 2 Terraform provider](https://www.terraform.io/docs/providers/rancher2/), which is a recommended option to standardize the hardware for your Kubernetes clusters. If you use the Rancher Terraform provider to provision hardware, and then use an RKE template to provision a Kubernetes cluster on that hardware, you can quickly create a comprehensive, production-ready cluster. - -Terraform allows you to: - -- Define almost any kind of infrastructure-as-code, including servers, databases, load balancers, monitoring, firewall settings, and SSL certificates -- Leverage catalog apps and multi-cluster apps -- Codify infrastructure across many platforms, including Rancher and major cloud providers -- Commit infrastructure-as-code to version control -- Easily repeat configuration and setup of infrastructure -- Incorporate infrastructure changes into standard development practices -- Prevent configuration drift, in which some servers become configured differently than others - -# How Does Terraform Work? - -Terraform is written in files with the extension `.tf`. It is written in HashiCorp Configuration Language, which is a declarative language that lets you define the infrastructure you want in your cluster, the cloud provider you are using, and your credentials for the provider. Then Terraform makes API calls to the provider in order to efficiently create that infrastructure. - -To create a Rancher-provisioned cluster with Terraform, go to your Terraform configuration file and define the provider as Rancher 2. You can set up your Rancher 2 provider with a Rancher API key. Note: The API key has the same permissions and access level as the user it is associated with. - -Then Terraform calls the Rancher API to provision your infrastructure, and Rancher calls the infrastructure provider. As an example, if you wanted to use Rancher to provision infrastructure on AWS, you would provide both your Rancher API key and your AWS credentials in the Terraform configuration file or in environment variables so that they could be used to provision the infrastructure. - -When you need to make changes to your infrastructure, instead of manually updating the servers, you can make changes in the Terraform configuration files. Then those files can be committed to version control, validated, and reviewed as necessary. Then when you run `terraform apply`, the changes would be deployed. - -# Tips for Working with Terraform - -- There are examples of how to provide most aspects of a cluster in the [documentation for the Rancher 2 provider.](https://www.terraform.io/docs/providers/rancher2/) - -- In the Terraform settings, you can install Docker Machine by using the Docker Machine node driver. - -- You can also modify auth in the Terraform provider. - -- You can reverse engineer how to do define a setting in Terraform by changing the setting in Rancher, then going back and checking your Terraform state file to see how it maps to the current state of your infrastructure. - -- If you want to manage Kubernetes cluster settings, Rancher settings, and hardware settings all in one place, use [Terraform modules](https://github.com/rancher/terraform-modules). You can pass a cluster configuration YAML file or an RKE template configuration file to a Terraform module so that the Terraform module will create it. In that case, you could use your infrastructure-as-code to manage the version control and revision history of both your Kubernetes cluster and its underlying hardware. - -# Tip for Creating CIS Benchmark Compliant Clusters - -This section describes one way that you can make security and compliance-related config files standard in your clusters. - -When you create a [CIS benchmark compliant cluster,]({{}}/rancher/v2.x/en/security/) you have an encryption config file and an audit log config file. - -Your infrastructure provisioning system can write those files to disk. Then in your RKE template, you would specify where those files will be, then add your encryption config file and audit log config file as extra mounts to the `kube-api-server`. - -Then you would make sure that the `kube-api-server` flag in your RKE template uses your CIS-compliant config files. - -In this way, you can create flags that comply with the CIS benchmark. - -# Resources - -- [Terraform documentation](https://www.terraform.io/docs/) -- [Rancher2 Terraform provider documentation](https://www.terraform.io/docs/providers/rancher2/) -- [The RanchCast - Episode 1: Rancher 2 Terraform Provider](https://youtu.be/YNCq-prI8-8): In this demo, Director of Community Jason van Brackel walks through using the Rancher 2 Terraform Provider to provision nodes and create a custom cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/rke-templates/template-access-and-sharing/_index.md b/content/rancher/v2.5/en/ecm/rke-templates/template-access-and-sharing/_index.md deleted file mode 100644 index 863faa1bc8b..00000000000 --- a/content/rancher/v2.5/en/ecm/rke-templates/template-access-and-sharing/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Access and Sharing -weight: 31 ---- - -If you are an RKE template owner, you can share it with users or groups of users, who can then use the template to create clusters. - -Since RKE templates are specifically shared with users and groups, owners can share different RKE templates with different sets of users. - -When you share a template, each user can have one of two access levels: - -- **Owner:** This user can update, delete, and share the templates that they own. The owner can also share the template with other users. -- **User:** These users can create clusters using the template. They can also upgrade those clusters to new revisions of the same template. When you share a template as **Make Public (read-only),** all users in your Rancher setup have the User access level for the template. - -If you create a template, you automatically become an owner of that template. - -If you want to delegate responsibility for updating the template, you can share ownership of the template. For details on how owners can modify templates, refer to the [documentation about revising templates.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising) - -There are several ways to share templates: - -- Add users to a new RKE template during template creation -- Add users to an existing RKE template -- Make the RKE template public, sharing it with all users in the Rancher setup -- Share template ownership with users who are trusted to modify the template - -### Sharing Templates with Specific Users or Groups - -To allow users or groups to create clusters using your template, you can give them the basic **User** access level for the template. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template that you want to share and click the **⋮ > Edit.** -1. In the **Share Template** section, click on **Add Member**. -1. Search in the **Name** field for the user or group you want to share the template with. -1. Choose the **User** access type. -1. Click **Save.** - -**Result:** The user or group can create clusters using the template. - -### Sharing Templates with All Users - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template that you want to share and click the **⋮ > Edit.** -1. Under **Share Template,** click **Make Public (read-only).** Then click **Save.** - -**Result:** All users in the Rancher setup can create clusters using the template. - -### Sharing Ownership of Templates - -If you are the creator of a template, you might want to delegate responsibility for maintaining and updating a template to another user or group. - -In that case, you can give users the Owner access type, which allows another user to update your template, delete it, or share access to it with other users. - -To give Owner access to a user or group, - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template that you want to share and click the **⋮ > Edit.** -1. Under **Share Template**, click on **Add Member** and search in the **Name** field for the user or group you want to share the template with. -1. In the **Access Type** field, click **Owner.** -1. Click **Save.** - -**Result:** The user or group has the Owner access type, and can modify, share, or delete the template. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/_index.md deleted file mode 100644 index 80456c25494..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/_index.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Setting up Kubernetes Clusters in Rancher -description: Provisioning Kubernetes Clusters -weight: 2 ---- - -Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. - -This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.x/en/overview/concepts) page. - -For a conceptual overview of how the Rancher server provisions clusters and what tools it uses to provision them, refer to the [architecture]({{}}/rancher/v2.x/en/overview/architecture/) page. - -This section covers the following topics: - - - -- [Setting up clusters in a hosted Kubernetes provider](#setting-up-clusters-in-a-hosted-kubernetes-cluster) -- [Launching Kubernetes with Rancher](#launching-kubernetes-with-rancher) - - [Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider](#launching-kubernetes-and-provisioning-nodes-in-an-infrastructure-provider) - - [Launching Kubernetes on Existing Custom Nodes](#launching-kubernetes-on-existing-custom-nodes) -- [Importing Existing Clusters](#importing-existing-clusters) - - [Importing and Editing K3s Clusters](#importing-and-editing-k3s-clusters) - - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table" %}} - -# Setting up Clusters in a Hosted Kubernetes Provider - -In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. - -For more information, refer to the section on [hosted Kubernetes clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters) - -# Launching Kubernetes with Rancher - -Rancher uses the [Rancher Kubernetes Engine (RKE)]({{}}/rke/latest/en/)as a library when provisioning Kubernetes on your own nodes. RKE is Rancher’s own lightweight Kubernetes installer. - -In RKE clusters, Rancher manages the deployment of Kubernetes. These clusters can be deployed on any bare metal server, cloud provider, or virtualization platform. - -These nodes can be dynamically provisioned through Rancher's UI, which calls [Docker Machine](https://docs.docker.com/machine/) to launch nodes on various cloud providers. - -If you already have a node that you want to add to an RKE cluster, you can add it to the cluster by running a Rancher agent container on it. - -For more information, refer to the section on [RKE clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) - -### Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider - -Rancher can dynamically provision nodes in infrastructure providers such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This template defines the parameters used to launch nodes in your cloud providers. - -One benefit of using nodes hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically replace it, thus maintaining the expected cluster configuration. - -The cloud providers available for creating a node template are decided based on the [node drivers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers) active in the Rancher UI. - -For more information, refer to the section on [nodes hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) - -### Launching Kubernetes on Existing Custom Nodes - -When setting up this type of cluster, Rancher installs Kubernetes on existing [custom nodes,]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) which creates a custom cluster. - -You can bring any nodes you want to Rancher and use them to create a cluster. - -These nodes include on-premise bare metal servers, cloud-hosted virtual machines, or on-premise virtual machines. - -# Importing Existing Clusters - -In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. - -Note that Rancher does not automate the provisioning, scaling, or upgrade of imported clusters. Other Rancher features, including management of cluster, role-based access control, policy, and workloads, are available for imported clusters. - -For all imported Kubernetes clusters except for K3s clusters, the configuration of an imported cluster still has to be edited outside of Rancher. Some examples of editing the cluster include adding and removing nodes, upgrading the Kubernetes version, and changing Kubernetes component parameters. - -In Rancher v2.4, it became possible to import a K3s cluster and upgrade Kubernetes by editing the cluster in the Rancher UI. - -For more information, refer to the section on [importing existing clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) - -### Importing and Editing K3s Clusters - -[K3s]({{}}/k3s/latest/en/) is lightweight, fully compliant Kubernetes distribution. K3s Kubernetes clusters can now be imported into Rancher. - -When a K3s cluster is imported, Rancher will recognize it as K3s, and the Rancher UI will expose the following features in addition to the functionality for other imported clusters: - -- The ability to upgrade the K3s version -- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster. - -For more information, refer to the section on [imported K3s clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/#additional-features-of-imported-k3s-clusters) diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/cluster-capabilities-table/index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/cluster-capabilities-table/index.md deleted file mode 100644 index b3560c76223..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/cluster-capabilities-table/index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -headless: true ---- -| Action | [Rancher launched Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) | [Hosted Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) | [Imported Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters) | -| --- | --- | ---| ---| -| [Using kubectl and a kubeconfig file to Access a Cluster]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) | ✓ | ✓ | ✓ | -| [Managing Cluster Members]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/) | ✓ | ✓ | ✓ | -| [Editing and Upgrading Clusters]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) | ✓ | ✓ | * | -| [Managing Nodes]({{}}/rancher/v2.x/en/cluster-admin/nodes) | ✓ | ✓ | ✓ | -| [Managing Persistent Volumes and Storage Classes]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) | ✓ | ✓ | ✓ | -| [Managing Projects, Namespaces and Workloads]({{}}/rancher/v2.x/en/cluster-admin/projects-and-namespaces/) | ✓ | ✓ | ✓ | -| [Using App Catalogs]({{}}/rancher/v2.x/en/catalog/) | ✓ | ✓ | ✓ | -| [Configuring Tools (Alerts, Notifiers, Logging, Monitoring, Istio)]({{}}/rancher/v2.x/en/cluster-admin/tools/) | ✓ | ✓ | ✓ | -| [Cloning Clusters]({{}}/rancher/v2.x/en/cluster-admin/cloning-clusters/)| ✓ | ✓ | | -| [Ability to rotate certificates]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/) | ✓ | | | -| [Ability to back up your Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/) | ✓ | | | -| [Ability to recover and restore etcd]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/) | ✓ | | | -| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher]({{}}/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/) | ✓ | | | -| [Configuring Pod Security Policies]({{}}/rancher/v2.x/en/cluster-admin/pod-security-policy/) | ✓ | | | -| [Running Security Scans]({{}}/rancher/v2.x/en/security/security-scan/) | ✓ | | | - -/* Cluster configuration options can't be edited for imported clusters, except for [K3s clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/#additional-features-for-imported-k3s-clusters) diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/_index.md deleted file mode 100644 index dffe1dfa5c6..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Setting up Clusters from Hosted Kubernetes Providers -weight: 2 ---- - -In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. - -In this use case, Rancher sends a request to a hosted provider using the provider's API. The provider then provisions and hosts the cluster for you. When the cluster finishes building, you can manage it from the Rancher UI along with clusters you've provisioned that are hosted on-premise or in an infrastructure provider. - -Rancher supports the following Kubernetes providers: - -Kubernetes Providers | Available as of | - --- | --- | -[Google GKE (Google Kubernetes Engine)](https://cloud.google.com/kubernetes-engine/) | v2.0.0 | -[Amazon EKS (Amazon Elastic Container Service for Kubernetes)](https://aws.amazon.com/eks/) | v2.0.0 | -[Microsoft AKS (Azure Kubernetes Service)](https://azure.microsoft.com/en-us/services/kubernetes-service/) | v2.0.0 | -[Alibaba ACK (Alibaba Cloud Container Service for Kubernetes)](https://www.alibabacloud.com/product/kubernetes) | v2.2.0 | -[Tencent TKE (Tencent Kubernetes Engine)](https://intl.cloud.tencent.com/product/tke) | v2.2.0 | -[Huawei CCE (Huawei Cloud Container Engine)](https://www.huaweicloud.com/en-us/product/cce.html) | v2.2.0 | - -## Hosted Kubernetes Provider Authentication - -When using Rancher to create a cluster hosted by a provider, you are prompted for authentication information. This information is required to access the provider's API. For more information on how to obtain this information, see the following procedures: - -- [Creating a GKE Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke) -- [Creating an EKS Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks) -- [Creating an AKS Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/aks) -- [Creating an ACK Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack) -- [Creating a TKE Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke) -- [Creating a CCE Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce) diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/ack/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/ack/_index.md deleted file mode 100644 index 23da1eadf53..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/ack/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Creating an Aliyun ACK Cluster -shortTitle: Alibaba Cloud Container Service for Kubernetes -weight: 2120 ---- - -You can use Rancher to create a cluster hosted in Alibaba Cloud Kubernetes (ACK). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/) for ACK, but by default, this cluster driver is `inactive`. In order to launch ACK clusters, you will need to [enable the ACK cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning ACK clusters. - -## Prerequisites - ->**Note** ->Deploying to ACK will incur charges. - -1. In Aliyun, activate the following services in their respective consoles. - - - [Container Service](https://cs.console.aliyun.com) - - [Resource Orchestration Service](https://ros.console.aliyun.com) - - [RAM](https://ram.console.aliyun.com) - -2. Make sure that the account you will be using to create the ACK cluster has the appropriate permissions. Referring to the official Alibaba Cloud documentation about [Role authorization](https://www.alibabacloud.com/help/doc-detail/86483.htm) and [Use the Container Service console as a RAM user](https://www.alibabacloud.com/help/doc-detail/86484.htm) for details. - -3. In Alibaba Cloud, create an [access key](https://www.alibabacloud.com/help/doc-detail/53045.html). - -4. In Alibaba Cloud, create an [SSH key pair](https://www.alibabacloud.com/help/doc-detail/51793.html). This key is used to access nodes in the Kubernetes cluster. - -## Create an ACK Cluster - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Alibaba ACK**. - -1. Enter a **Cluster Name**. - -1. {{< step_create-cluster_member-roles >}} - -1. Configure **Account Access** for the ACK cluster. Choose the geographical region in which to build your cluster, and input the access key that was created as part of the prerequisite steps. - -1. Click **Next: Configure Cluster**, then choose cluster type, the version of Kubernetes and the availability zone. - -1. If you choose **Kubernetes** as the cluster type, Click **Next: Configure Master Nodes**, then complete the **Master Nodes** form. - -1. Click **Next: Configure Worker Nodes**, then complete the **Worker Nodes** form. - -1. Review your options to confirm they're correct. Then click **Create**. - -{{< result_create-cluster >}} diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/aks/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/aks/_index.md deleted file mode 100644 index 9a942184721..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/aks/_index.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: Creating an AKS Cluster -shortTitle: Azure Kubernetes Service -weight: 2115 ---- - -You can use Rancher to create a cluster hosted in Microsoft Azure Kubernetes Service (AKS). - -## Prerequisites in Microsoft Azure - ->**Note** ->Deploying to AKS will incur charges. - -To interact with Azure APIs, an AKS cluster requires an Azure Active Directory (AD) service principal. The service principal is needed to dynamically create and manage other Azure resources, and it provides credentials for your cluster to communicate with AKS. For more information about the service principal, refer to the [AKS documentation](https://docs.microsoft.com/en-us/azure/aks/kubernetes-service-principal). - -Before creating the service principal, you need to obtain the following information from the [Microsoft Azure Portal](https://portal.azure.com): - -- Your subscription ID -- Your tenant ID -- An app ID (also called a client ID) -- Client secret -- A resource group - -The below sections describe how to set up these prerequisites using either the Azure command line tool or the Azure portal. - -### Setting Up the Service Principal with the Azure Command Line Tool - -You can create the service principal by running this command: - -``` -az ad sp create-for-rbac --skip-assignment -``` - -The result should show information about the new service principal: -``` -{ - "appId": "xxxx--xxx", - "displayName": "", - "name": "http://", - "password": "", - "tenant": "" -} -``` - -You also need to add roles to the service principal so that it has privileges for communication with the AKS API. It also needs access to create and list virtual networks. - -Below is an example command for assigning the Contributor role to a service principal. Contributors can manage anything on AKS but cannot give access to others: - -``` -az role assignment create \ - --assignee $appId \ - --scope /subscriptions/$/resourceGroups/$ \ - --role Contributor -``` - -You can also create the service principal and give it Contributor privileges by combining the two commands into one. In this command, the scope needs to provide a full path to an Azure resource: - -``` -az ad sp create-for-rbac \ - --scope /subscriptions/$/resourceGroups/$ \ - --role Contributor -``` - -### Setting Up the Service Principal from the Azure Portal - -You can also follow these instructions to set up a service principal and give it role-based access from the Azure Portal. - -1. Go to the Microsoft Azure Portal [home page](https://portal.azure.com). - -1. Click **Azure Active Directory.** - -1. Click **App registrations.** - -1. Click **New registration.** - -1. Enter a name. This will be the name of your service principal. - -1. Optional: Choose which accounts can use the service principal. - -1. Click **Register.** - -1. You should now see the name of your service principal under **Azure Active Directory > App registrations.** - -1. Click the name of your service principal. Take note of the tenant ID and application ID (also called app ID or client ID) so that you can use it when provisioning your AKS cluster. Then click **Certificates & secrets.** - -1. Click **New client secret.** - -1. Enter a short description, pick an expiration time, and click **Add.** Take note of the client secret so that you can use it when provisioning the AKS cluster. - -**Result:** You have created a service principal and you should be able to see it listed in the **Azure Active Directory** section under **App registrations.** You still need to give the service principal access to AKS. - -To give role-based access to your service principal, - -1. Click **All Services** in the left navigation bar. Then click **Subscriptions.** - -1. Click the name of the subscription that you want to associate with your Kubernetes cluster. Take note of the subscription ID so that you can use it when provisioning your AKS cluster. - -1. Click **Access Control (IAM).** - -1. In the **Add role assignment** section, click **Add.** - -1. In the **Role** field, select a role that will have access to AKS. For example, you can use the **Contributor** role, which has permission to manage everything except for giving access to other users. - -1. In the **Assign access to** field, select **Azure AD user, group, or service principal.** - -1. In the **Select** field, select the name of your service principal and click **Save.** - -**Result:** Your service principal now has access to AKS. - - -## Create the AKS Cluster - -Use Rancher to set up and configure your Kubernetes cluster. - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Azure Kubernetes Service**. - -1. Enter a **Cluster Name**. - -1. {{< step_create-cluster_member-roles >}} - -1. Use your subscription ID, tenant ID, app ID, and client secret to give your cluster access to AKS. If you don't have all of that information, you can retrieve it using these instructions: - - **App ID and tenant ID:** To get the app ID and tenant ID, you can go to the Azure Portal, then click **Azure Active Directory**, then click **App registrations,** then click the name of the service principal. The app ID and tenant ID are both on the app registration detail page. - - **Client secret:** If you didn't copy the client secret when creating the service principal, you can get a new one if you go to the app registration detail page, then click **Certificates & secrets**, then click **New client secret.** - - **Subscription ID:** You can get the subscription ID is available in the portal from **All services > Subscriptions.** - -1. {{< step_create-cluster_cluster-options >}} - -1. Complete the **Account Access** form using the output from your Service Principal. This information is used to authenticate with Azure. - -1. Use **Nodes** to provision each node in your cluster and choose a geographical region. - - [Microsoft Documentation: How to create and use an SSH public and private key pair](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys) -
-1. Click **Create**. -
-1. Review your options to confirm they're correct. Then click **Create**. - -{{< result_create-cluster >}} diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/cce/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/cce/_index.md deleted file mode 100644 index 452a69d543b..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/cce/_index.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Creating a Huawei CCE Cluster -shortTitle: Huawei Cloud Kubernetes Service -weight: 2130 ---- - -You can use Rancher to create a cluster hosted in Huawei Cloud Container Engine (CCE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/) for CCE, but by default, this cluster driver is `inactive`. In order to launch CCE clusters, you will need to [enable the CCE cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning CCE clusters. - -## Prerequisites in Huawei - ->**Note** ->Deploying to CCE will incur charges. - -1. Find your project ID in Huawei CCE portal. See the CCE documentation on how to [manage your projects](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0066738518.html). - -2. Create an [Access Key ID and Secret Access Key](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0079477318.html). - -## Limitations - -Huawei CCE service doesn't support the ability to create clusters with public access through their API. You are required to run Rancher in the same VPC as the CCE clusters that you want to provision. - -## Create the CCE Cluster - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Huawei CCE**. - -3. Enter a **Cluster Name**. - -4. {{< step_create-cluster_member-roles >}} - -5. Enter **Project Id**, Access Key ID as **Access Key** and Secret Access Key **Secret Key**. Then Click **Next: Configure cluster**. - -6. Fill the following cluster configuration: - - |Settings|Description| - |---|---| - | Cluster Type | Which type or node you want to include into the cluster, `VirtualMachine` or `BareMetal`. | - | Description | The description of the cluster. | - | Master Version | The Kubernetes version. | - | Management Scale Count | The max node count of the cluster. The options are 50, 200 and 1000. The larger of the scale count, the more the cost. | - | High Availability | Enable master node high availability. The cluster with high availability enabled will have more cost. | - | Container Network Mode | The network mode used in the cluster. `overlay_l2` and `vpc-router` is supported in `VirtualMachine` type and `underlay_ipvlan` is supported in `BareMetal` type | - | Container Network CIDR | Network CIDR for the cluster. | - | VPC Name | The VPC name which the cluster is going to deploy into. Rancher will create one if it is blank. | - | Subnet Name | The Subnet name which the cluster is going to deploy into. Rancher will create one if it is blank. | - | External Server | This option is reserved for the future we can enable CCE cluster public access via API. For now, it is always disabled. | - | Cluster Label | The labels for the cluster. | - | Highway Subnet | This option is only supported in `BareMetal` type. It requires you to select a VPC with high network speed for the bare metal machines. | - - **Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) - -7. Fill the following node configuration of the cluster: - - |Settings|Description| - |---|---| - | Zone | The available zone at where the node(s) of the cluster is deployed. | - | Billing Mode | The bill mode for the cluster node(s). In `VirtualMachine` type, only `Pay-per-use` is supported. in `BareMetal`, you can choose `Pay-per-use` or `Yearly/Monthly`. | - | Validity Period | This option only shows in `Yearly/Monthly` bill mode. It means how long you want to pay for the cluster node(s). | - | Auto Renew | This option only shows in `Yearly/Monthly` bill mode. It means that the cluster node(s) will renew the `Yearly/Monthly` payment automatically or not. | - | Data Volume Type | Data volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | - | Data Volume Size | Data volume size for the cluster node(s) | - | Root Volume Type | Root volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | - | Root Volume Size | Root volume size for the cluster node(s) | - | Node Flavor | The node flavor of the cluster node(s). The flavor list in Rancher UI is fetched from Huawei Cloud. It includes all the supported node flavors. | - | Node Count | The node count of the cluster | - | Node Operating System | The operating system for the cluster node(s). Only `EulerOS 2.2` and `CentOS 7.4` are supported right now. | - | SSH Key Name | The ssh key for the cluster node(s) | - | EIP | The public IP options for the cluster node(s). `Disabled` means that the cluster node(s) are not going to bind a public IP. `Create EIP` means that the cluster node(s) will bind one or many newly created Eips after provisioned and more options will be shown in the UI to set the to-create EIP parameters. And `Select Existed EIP` means that the node(s) will bind to the EIPs you select. | - | EIP Count | This option will only be shown when `Create EIP` is selected. It means how many EIPs you want to create for the node(s). | - | EIP Type | This option will only be shown when `Create EIP` is selected. The options are `5_bgp` and `5_sbgp`. | - | EIP Share Type | This option will only be shown when `Create EIP` is selected. The only option is `PER`. | - | EIP Charge Mode | This option will only be shown when `Create EIP` is selected. The options are pay by `BandWidth` and pay by `Traffic`. | - | EIP Bandwidth Size | This option will only be shown when `Create EIP` is selected. The BandWidth of the EIPs. | - | Authentication Mode | It means enabling `RBAC` or also enabling `Authenticating Proxy`. If you select `Authenticating Proxy`, the certificate which is used for authenticating proxy will be also required. | - | Node Label | The labels for the cluster node(s). | - -8. Click **Create** to create the CCE cluster. - -{{< result_create-cluster >}} diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/eks/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/eks/_index.md deleted file mode 100644 index 1e87eee4c83..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/eks/_index.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -title: Creating an EKS Cluster -shortTitle: Amazon EKS -weight: 2110 ---- - -Amazon EKS provides a managed control plane for your Kubernetes cluster. Amazon EKS runs the Kubernetes control plane instances across multiple Availability Zones to ensure high availability. Rancher provides an intuitive user interface for managing and deploying the Kubernetes clusters you run in Amazon EKS. With this guide, you will use Rancher to quickly and easily launch an Amazon EKS Kubernetes cluster in your AWS account. For more information on Amazon EKS, see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html). - - -## Prerequisites in Amazon Web Services - ->**Note** ->Deploying to Amazon AWS will incur charges. For more information, refer to the [EKS pricing page](https://aws.amazon.com/eks/pricing/). - -To set up a cluster on EKS, you will need to set up an Amazon VPC (Virtual Private Cloud). You will also need to make sure that the account you will be using to create the EKS cluster has the appropriate permissions. For details, refer to the official guide on [Amazon EKS Prerequisites](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#eks-prereqs). - -### Amazon VPC - -You need to set up an Amazon VPC to launch the EKS cluster. The VPC enables you to launch AWS resources into a virtual network that you've defined. For more information, refer to the [Tutorial: Creating a VPC with Public and Private Subnets for Your Amazon EKS Cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-public-private-vpc.html). - -### IAM Policies - -Rancher needs access to your AWS account in order to provision and administer your Kubernetes clusters in Amazon EKS. You'll need to create a user for Rancher in your AWS account and define what that user can access. - -1. Create a user with programmatic access by following the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). - -2. Next, create an IAM policy that defines what this user has access to in your AWS account. The required permissions are [here.]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/#appendix-minimum-eks-permissions) Follow the steps [here](https://docs.aws.amazon.com/eks/latest/userguide/EKS_IAM_user_policies.html) to create an IAM policy and attach it to your user. - -3. Finally, follow the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) to create an access key and secret key for this user. - -> **Note:** It's important to regularly rotate your access and secret keys. See this [documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#rotating_access_keys_console) for more information. - -For more detailed information on IAM policies for EKS, refer to the official [documentation on Amazon EKS IAM Policies, Roles, and Permissions](https://docs.aws.amazon.com/eks/latest/userguide/IAM_policies.html). - -## Architecture - -The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by RKE and another created by EKS. - -![Rancher architecture with EKS hosted cluster]({{}}/img/rancher/rancher-architecture.svg) - -## Create the EKS Cluster - -Use Rancher to set up and configure your Kubernetes cluster. - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Amazon EKS**. - -1. Enter a **Cluster Name**. - -1. {{< step_create-cluster_member-roles >}} - -1. Configure **Account Access** for the EKS cluster. Complete each drop-down and field using the information obtained in [2. Create Access Key and Secret Key](#prerequisites-in-amazon-web-services). - - | Setting | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Region | From the drop-down choose the geographical region in which to build your cluster. | - | Access Key | Enter the access key that you created in [2. Create Access Key and Secret Key](#2-create-access-key-and-secret-key). | - | Secret Key | Enter the secret key that you created in [2. Create Access Key and Secret Key](#2-create-access-key-and-secret-key). | - -1. Click **Next: Select Service Role**. Then choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). - - Service Role | Description - -------------|--------------------------- - Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. - Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). - -1. Click **Next: Select VPC and Subnet**. - -1. Choose an option for **Public IP for Worker Nodes**. Your selection for this option determines what options are available for **VPC & Subnet**. - - Option | Description - -------|------------ - Yes | When your cluster nodes are provisioned, they're assigned a both a private and public IP address. - No: Private IPs only | When your cluster nodes are provisioned, they're assigned only a private IP address.

If you choose this option, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. - -1. Now choose a **VPC & Subnet**. For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. - - - [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) - - [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) - - {{% accordion id="yes" label="Public IP for Worker Nodes—Yes" %}} -If you choose to assign a public IP address to your cluster's worker nodes, you have the option of choosing between a VPC that's automatically generated by Rancher (i.e., **Standard: Rancher generated VPC and Subnet**), or a VPC that you're already created with AWS (i.e., **Custom: Choose from your existing VPC and Subnets**). Choose the option that best fits your use case. - -1. Choose a **VPC and Subnet** option. - - Option | Description - -------|------------ - Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC and Subnet. - Custom: Choose from your exiting VPC and Subnets | While provisioning your cluster, Rancher configures your nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html). If you choose this option, complete the remaining steps below. - -1. If you're using **Custom: Choose from your existing VPC and Subnets**: - - (If you're using **Standard**, skip to [step 11](#select-instance-options)) - - 1. Make sure **Custom: Choose from your existing VPC and Subnets** is selected. - - 1. From the drop-down that displays, choose a VPC. - - 1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. - - 1. Click **Next: Select Security Group**. - {{% /accordion %}} - {{% accordion id="no" label="Public IP for Worker Nodes—No: Private IPs only" %}} -If you chose this option, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. Follow the steps below. - ->**Tip:** When using only private IP addresses, you can provide your nodes internet access by creating a VPC constructed with two subnets, a private set and a public set. The private set should have its route tables configured to point toward a NAT in the public set. For more information on routing traffic from private subnets, please see the [official AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html). - - 1. From the drop-down that displays, choose a VPC. - - 1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. - - 1. Click **Next: Select Security Group**. - {{% /accordion %}} - -1. Choose a **Security Group**. See the documentation below on how to create one. - - Amazon Documentation: - - [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) - - [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) - - [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) - -1. Click **Select Instance Options**, and then edit the node options available. Instance type and size of your worker nodes affects how many IP addresses each worker node will have available. See this [documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for more information. - - Option | Description - -------|------------ - Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. - Custom AMI Override | If you want to use a custom [Amazon Machine Image](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html#creating-an-ami) (AMI), specify it here. By default, Rancher will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the EKS version that you chose. - Desired ASG Size | The number of instances that your cluster will provision. - User Data | Custom commands can to be passed to perform automated configuration tasks **WARNING: Modifying this may cause your nodes to be unable to join the cluster.** _Note: Available as of v2.2.0_ - -1. Click **Create**. - -{{< result_create-cluster >}} - -## Troubleshooting - -For any issues or troubleshooting details for your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/troubleshooting.html). - -## AWS Service Events - -To find information on any AWS Service events, please see [this page](https://status.aws.amazon.com/). - -## Security and Compliance - -For more information on security and compliance with your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/shared-responsibilty.html). - -## Tutorial - -This [tutorial](https://aws.amazon.com/blogs/opensource/managing-eks-clusters-rancher/) on the AWS Open Source Blog will walk you through how to set up an EKS cluster with Rancher, deploy a publicly accessible app to test the cluster, and deploy a sample project to track real-time geospatial data using a combination of other open-source software such as Grafana and InfluxDB. - -## Appendix - Minimum EKS Permissions - -Documented here is a minimum set of permissions necessary to use all functionality of the EKS driver in Rancher. - -Resource targeting uses `*` as the ARN of many of the resources created cannot be known prior to creating the EKS cluster in Rancher. Some permissions (for example `ec2:CreateVpc`) are only used in situations where Rancher handles the creation of certain resources. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "EC2Permisssions", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSubnet", - "ec2:CreateKeyPair", - "ec2:AttachInternetGateway", - "ec2:ReplaceRoute", - "ec2:DeleteRouteTable", - "ec2:AssociateRouteTable", - "ec2:DescribeInternetGateways", - "ec2:CreateRoute", - "ec2:CreateInternetGateway", - "ec2:RevokeSecurityGroupEgress", - "ec2:DescribeAccountAttributes", - "ec2:DeleteInternetGateway", - "ec2:DescribeKeyPairs", - "ec2:CreateTags", - "ec2:CreateRouteTable", - "ec2:DescribeRouteTables", - "ec2:DetachInternetGateway", - "ec2:DisassociateRouteTable", - "ec2:RevokeSecurityGroupIngress", - "ec2:DeleteVpc", - "ec2:CreateSubnet", - "ec2:DescribeSubnets", - "ec2:DeleteKeyPair", - "ec2:DeleteTags", - "ec2:CreateVpc", - "ec2:DescribeAvailabilityZones", - "ec2:CreateSecurityGroup", - "ec2:ModifyVpcAttribute", - "ec2:AuthorizeSecurityGroupEgress", - "ec2:DescribeTags", - "ec2:DeleteRoute", - "ec2:DescribeSecurityGroups", - "ec2:DescribeImages", - "ec2:DescribeVpcs", - "ec2:DeleteSecurityGroup" - ], - "Resource": "*" - }, - { - "Sid": "EKSPermissions", - "Effect": "Allow", - "Action": [ - "eks:DeleteFargateProfile", - "eks:DescribeFargateProfile", - "eks:ListTagsForResource", - "eks:UpdateClusterConfig", - "eks:DescribeNodegroup", - "eks:ListNodegroups", - "eks:DeleteCluster", - "eks:CreateFargateProfile", - "eks:DeleteNodegroup", - "eks:UpdateNodegroupConfig", - "eks:DescribeCluster", - "eks:ListClusters", - "eks:UpdateClusterVersion", - "eks:UpdateNodegroupVersion", - "eks:ListUpdates", - "eks:CreateCluster", - "eks:UntagResource", - "eks:CreateNodegroup", - "eks:ListFargateProfiles", - "eks:DescribeUpdate", - "eks:TagResource" - ], - "Resource": "*" - }, - { - "Sid": "IAMPermissions", - "Effect": "Allow", - "Action": [ - "iam:ListRoleTags", - "iam:RemoveRoleFromInstanceProfile", - "iam:CreateRole", - "iam:AttachRolePolicy", - "iam:AddRoleToInstanceProfile", - "iam:DetachRolePolicy", - "iam:GetRole", - "iam:DeleteRole", - "iam:CreateInstanceProfile", - "iam:ListInstanceProfilesForRole", - "iam:PassRole", - "iam:GetInstanceProfile", - "iam:ListRoles", - "iam:ListInstanceProfiles", - "iam:DeleteInstanceProfile" - ], - "Resource": "*" - }, - { - "Sid": "CloudFormationPermisssions", - "Effect": "Allow", - "Action": [ - "cloudformation:DescribeStackResource", - "cloudformation:ListStackResources", - "cloudformation:DescribeStackResources", - "cloudformation:DescribeStacks", - "cloudformation:ListStacks", - "cloudformation:CreateStack", - "cloudformation:DeleteStack" - ], - "Resource": "*" - }, - { - "Sid": "AutoScalingPermissions", - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:UpdateAutoScalingGroup", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "autoscaling:CreateOrUpdateTags", - "autoscaling:DeleteAutoScalingGroup", - "autoscaling:CreateAutoScalingGroup", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:CreateLaunchConfiguration", - "autoscaling:DeleteLaunchConfiguration" - ], - "Resource": "*" - } - ] -} -``` diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/gke/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/gke/_index.md deleted file mode 100644 index 25a1a88f2bd..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/gke/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Creating a GKE Cluster -shortTitle: Google Kubernetes Engine -weight: 2105 ---- - -## Prerequisites in Google Kubernetes Engine - ->**Note** ->Deploying to GKE will incur charges. - -Create a service account using [Google Kubernetes Engine](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts). GKE uses this account to operate your cluster. Creating this account also generates a private key used for authentication. - -The service account requires the following roles: - -- **Compute Viewer:** `roles/compute.viewer` -- **Project Viewer:** `roles/viewer` -- **Kubernetes Engine Admin:** `roles/container.admin` -- **Service Account User:** `roles/iam.serviceAccountUser` - -[Google Documentation: Creating and Enabling Service Accounts](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) - -## Create the GKE Cluster - -Use {{< product >}} to set up and configure your Kubernetes cluster. - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Google Kubernetes Engine**. - -3. Enter a **Cluster Name**. - -4. {{< step_create-cluster_member-roles >}} - -5. Either paste your service account private key in the **Service Account** text box or **Read from a file**. Then click **Next: Configure Nodes**. - - >**Note:** After submitting your private key, you may have to enable the Google Kubernetes Engine API. If prompted, browse to the URL displayed in the Rancher UI to enable the API. - -6. Select your **Cluster Options**, customize your **Nodes** and customize the **Security** for the GKE cluster. Review your options to confirm they're correct. Then click **Create**. - -{{< result_create-cluster >}} diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/tke/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/tke/_index.md deleted file mode 100644 index f0046b905c2..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/hosted-kubernetes-clusters/tke/_index.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Creating a Tencent TKE Cluster -shortTitle: Tencent Kubernetes Engine -weight: 2125 ---- - -You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/) for TKE, but by default, this cluster driver is `inactive`. In order to launch TKE clusters, you will need to [enable the TKE cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning TKE clusters. - -## Prerequisites in Tencent - ->**Note** ->Deploying to TKE will incur charges. - -1. Make sure that the account you will be using to create the TKE cluster has the appropriate permissions by referring to the [Cloud Access Management](https://intl.cloud.tencent.com/document/product/598/10600) documentation for details. - -2. Create a [Cloud API Secret ID and Secret Key](https://console.cloud.tencent.com/capi). - -3. Create a [Private Network and Subnet](https://intl.cloud.tencent.com/document/product/215/4927) in the region that you want to deploy your Kubernetes cluster. - -4. Create a [SSH key pair](https://intl.cloud.tencent.com/document/product/213/6092). This key is used to access the nodes in the Kubernetes cluster. - -## Create a TKE Cluster - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Tencent TKE**. - -3. Enter a **Cluster Name**. - -4. {{< step_create-cluster_member-roles >}} - -5. Configure **Account Access** for the TKE cluster. Complete each drop-down and field using the information obtained in [Prerequisites](#prerequisites). - - | Option | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Region | From the drop-down chooses the geographical region in which to build your cluster. | - | Secret ID | Enter the Secret ID that you obtained from the Tencent Cloud Console. | - | Secret Key | Enter the Secret key that you obtained from Tencent Cloud Console. | - -6. Click `Next: Configure Cluster` to set your TKE cluster configurations. - - | Option | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Kubernetes Version | The TKE only supports Kubernetes version 1.10.5 now. | - | Node Count | Enter the amount of worker node you want to purchase for your Kubernetes cluster, up to 100. | - | VPC | Select the VPC name that you have created in the Tencent Cloud Console. | - | Container Network CIDR | Enter the CIDR range of your Kubernetes cluster, you may check the available range of the CIDR in the VPC service of the Tencent Cloud Console. Default to 172.16.0.0/16. | - - **Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) - -7. Click `Next: Select Instance Type` to choose the instance type that will use for your TKE cluster. - - | Option | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Availability Zone | Choose the availability zone of the VPC region. | - | Subnet | Select the Subnet that you have created within the VPC, and add a new one if you don't have it in the chosen availability zone. | - | Instance Type | From the drop-down chooses the VM instance type that you want to use for the TKE cluster, default to S2.MEDIUM4 (CPU 2 Memory 4 GiB). | - -8. Click `Next: Configure Instance` to configure the VM instance that will use for your TKE cluster. - - Option | Description - -------|------------ - Operating System | The name of the operating system, currently supports Centos7.2x86_64 or ubuntu16.04.1 LTSx86_64 - Security Group | Security group ID, default does not bind any security groups. - Root Disk Type | System disk type. System disk type restrictions are detailed in the [CVM instance configuration](https://cloud.tencent.com/document/product/213/11518). - Root Disk Size | System disk size. Linux system adjustment range is 20 - 50G, step size is 1. - Data Disk Type | Data disk type, default value to the SSD cloud drive - Data Disk Size | Data disk size (GB), the step size is 10 - Band Width Type | Type of bandwidth, PayByTraffic or PayByHour - Band Width | Public network bandwidth (Mbps) - Key Pair | Key id, after associating the key can be used to logging to the VM node - -9. Click **Create**. - -{{< result_create-cluster >}} diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/_index.md deleted file mode 100644 index 70b3cef45e8..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/_index.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Importing Existing Clusters into Rancher -description: Learn how you can create a cluster in Rancher by importing an existing Kubernetes cluster. Then, you can manage it using Rancher -metaTitle: 'Kubernetes Cluster Management' -metaDescription: 'Learn how you can import an existing Kubernetes cluster and then manage it using Rancher' -weight: 5 ---- - -When managing an imported cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. - -Rancher features, including management of cluster, role-based access control, policy, and workloads, are available for imported clusters. Note that Rancher does not automate the provisioning or scaling of imported clusters. - -For all imported Kubernetes clusters except for K3s clusters, the configuration of an imported cluster still has to be edited outside of Rancher. Some examples of editing the cluster include adding and removing nodes, upgrading the Kubernetes version, and changing Kubernetes component parameters. - -Rancher v2.4 added the capability to import a K3s cluster into Rancher, as well as the ability to upgrade Kubernetes by editing the cluster in the Rancher UI. - -- [Features](#features) -- [Prerequisites](#prerequisites) -- [Importing a cluster](#importing-a-cluster) -- [Imported K3s clusters](#imported-k3s-clusters) - - [Additional features for imported K3s clusters](#additional-features-for-imported-k3s-clusters) - - [Configuring a K3s Cluster to Enable Importation to Rancher](#configuring-a-k3s-cluster-to-enable-importation-to-rancher) - - [Debug Logging and Troubleshooting for Imported K3s clusters](#debug-logging-and-troubleshooting-for-imported-k3s-clusters) -- [Annotating imported clusters](#annotating-imported-clusters) - -# Features - -After importing a cluster, the cluster owner can: - -- [Manage cluster access]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) through role-based access control -- Enable [monitoring]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) and [logging]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) -- Enable [Istio]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/) -- Use [pipelines]({{}}/rancher/v2.x/en/project-admin/pipelines/) -- Configure [alerts]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) and [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) -- Manage [projects]({{}}/rancher/v2.x/en/project-admin/) and [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/) - -After importing a K3s cluster, the cluster owner can also [upgrade Kubernetes from the Rancher UI.]({{}}/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/) - -# Prerequisites - -If your existing Kubernetes cluster already has a `cluster-admin` role defined, you must have this `cluster-admin` privilege to import the cluster into Rancher. - -In order to apply the privilege, you need to run: - -```plain -kubectl create clusterrolebinding cluster-admin-binding \ - --clusterrole cluster-admin \ - --user [USER_ACCOUNT] -``` - -before running the `kubectl` command to import the cluster. - -By default, GKE users are not given this privilege, so you will need to run the command before importing GKE clusters. To learn more about role-based access control for GKE, please click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). - -> If you are importing a K3s cluster, make sure the `cluster.yml` is readable. It is protected by default. For details, refer to [Configuring a K3s cluster to enable importation to Rancher.](#configuring-a-k3s-cluster-to-enable-importation-to-rancher) - -# Importing a Cluster - -1. From the **Clusters** page, click **Add Cluster**. -2. Choose **Import**. -3. Enter a **Cluster Name**. -4. {{< step_create-cluster_member-roles >}} -5. Click **Create**. -6. The prerequisite for `cluster-admin` privileges is shown (see **Prerequisites** above), including an example command to fulfil the prerequisite. -7. Copy the `kubectl` command to your clipboard and run it on a node where kubeconfig is configured to point to the cluster you want to import. If you are unsure it is configured correctly, run `kubectl get nodes` to verify before running the command shown in {{< product >}}. -8. If you are using self signed certificates, you will receive the message `certificate signed by unknown authority`. To work around this validation, copy the command starting with `curl` displayed in {{< product >}} to your clipboard. Then run the command on a node where kubeconfig is configured to point to the cluster you want to import. -9. When you finish running the command(s) on your node, click **Done**. - {{< result_import-cluster >}} - -> **Note:** -> You can not re-import a cluster that is currently active in a Rancher setup. - -# Imported K3s Clusters - -You can now import a K3s Kubernetes cluster into Rancher. [K3s]({{}}/k3s/latest/en/) is lightweight, fully compliant Kubernetes distribution. You can also upgrade Kubernetes by editing the K3s cluster in the Rancher UI. - -### Additional Features for Imported K3s Clusters - -When a K3s cluster is imported, Rancher will recognize it as K3s, and the Rancher UI will expose the following features in addition to the functionality for other imported clusters: - -- The ability to upgrade the K3s version -- The ability to configure the maximum number of nodes that will be upgraded concurrently -- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster. - -### Configuring K3s Cluster Upgrades - -> It is a Kubernetes best practice to back up the cluster before upgrading. When upgrading a high-availability K3s cluster with an external database, back up the database in whichever way is recommended by the relational database provider. - -The **concurrency** is the maximum number of nodes that are permitted to be unavailable during an upgrade. If number of unavailable nodes is larger than the **concurrency,** the upgrade will fail. If an upgrade fails, you may need to repair or remove failed nodes before the upgrade can succeed. - -- **Controlplane concurrency:** The maximum number of server nodes to upgrade at a single time; also the maximum unavailable server nodes -- **Worker concurrency:** The maximum number worker nodes to upgrade at the same time; also the maximum unavailable worker nodes - -In the K3s documentation, controlplane nodes are called server nodes. These nodes run the Kubernetes master, which maintains the desired state of the cluster. In K3s, these controlplane nodes have the capability to have workloads scheduled to them by default. - -Also in the K3s documentation, nodes with the worker role are called agent nodes. Any workloads or pods that are deployed in the cluster can be scheduled to these nodes by default. - -### Configuring a K3s Cluster to Enable Importation to Rancher - -The K3s server needs to be configured to allow writing to the kubeconfig file. - -This can be accomplished by passing `--write-kubeconfig-mode 644` as a flag during installation: - -``` -$ curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 -``` - -The option can also be specified using the environment variable `K3S_KUBECONFIG_MODE`: - -``` -$ curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" sh -s - -``` - -### Debug Logging and Troubleshooting for Imported K3s Clusters - -Nodes are upgraded by the system upgrade controller running in the downstream cluster. Based on the cluster configuration, Rancher deploys two [plans](https://github.com/rancher/system-upgrade-controller#example-upgrade-plan) to upgrade K3s nodes: one for controlplane nodes and one for workers. The system upgrade controller follows the plans and upgrades the nodes. - -To enable debug logging on the system upgrade controller deployment, edit the [configmap](https://github.com/rancher/system-upgrade-controller/blob/50a4c8975543d75f1d76a8290001d87dc298bdb4/manifests/system-upgrade-controller.yaml#L32) to set the debug environment variable to true. Then restart the `system-upgrade-controller` pod. - -Logs created by the `system-upgrade-controller` can be viewed by running this command: - -``` -kubectl logs -n cattle-system system-upgrade-controller -``` - -The current status of the plans can be viewed with this command: - -``` -kubectl get plans -A -o yaml -``` - -If the cluster becomes stuck in upgrading, restart the `system-upgrade-controller`. - -To prevent issues when upgrading, the [Kubernetes upgrade best practices](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) should be followed. - -### Annotating Imported Clusters - -For all types of imported Kubernetes clusters except for K3s Kubernetes clusters, Rancher doesn't have any information about how the cluster is provisioned or configured. - -Therefore, when Rancher imports a cluster, it assumes that several capabilities are disabled by default. Rancher assumes this in order to avoid exposing UI options to the user even when the capabilities are not enabled in the imported cluster. - -However, if the cluster has a certain capability, such as the ability to use a pod security policy, a user of that cluster might still want to select pod security policies for the cluster in the Rancher UI. In order to do that, the user will need to manually indicate to Rancher that pod security policies are enabled for the cluster. - -By annotating an imported cluster, it is possible to indicate to Rancher that a cluster was given a pod security policy, or another capability, outside of Rancher. - -This example annotation indicates that a pod security policy is enabled: - -```json -"capabilities.cattle.io/pspEnabled": "true" -``` - -The following annotation indicates Ingress capabilities. Note that that the values of non-primitive objects need to be JSON encoded, with quotations escaped. - -```json -"capabilities.cattle.io/ingressCapabilities": "[{"customDefaultBackend":true,"ingressProvider":"asdf"}]" -``` - -These capabilities can be annotated for the cluster: - -- `ingressCapabilities` -- `loadBalancerCapabilities` -- `nodePoolScalingSupported` -- `nodePortRange` -- `pspEnabled` -- `taintSupport` - -All the capabilities and their type definitions can be viewed in the Rancher API view, at `[Rancher Server URL]/v3/schemas/capabilities`. - -To annotate an imported cluster, - -1. Go to the cluster view in Rancher and select **⋮ > Edit.** -1. Expand the **Labels & Annotations** section. -1. Click **Add Annotation.** -1. Add an annotation to the cluster with the format `capabilities/: ` where `value` is the cluster capability that will be overridden by the annotation. In this scenario, Rancher is not aware of any capabilities of the cluster until you add the annotation. -1. Click **Save.** - -**Result:** The annotation does not give the capabilities to the cluster, but it does indicate to Rancher that the cluster has those capabilities. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/eks/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/eks/_index.md deleted file mode 100644 index f587a973aad..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/eks/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Importing EKS Clusters -weight: 2 ---- \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/k3s/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/k3s/_index.md deleted file mode 100644 index de4669c4407..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/k3s/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Importing K3s Kubernetes Clusters -weight: 1 ---- \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/rancher-k8s/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/rancher-k8s/_index.md deleted file mode 100644 index 73dfb08b048..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/imported-clusters/rancher-k8s/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Importing Rancher Kubernetes Clusters -weight: 3 ---- \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/production-checklist/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/production-checklist/_index.md deleted file mode 100644 index a6c2c6e516f..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/production-checklist/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Checklist for Production-Ready Clusters -weight: 1 ---- - -In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. - -For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements) - -This is a shortlist of best practices that we strongly recommend for all production clusters. - -For a full list of all the best practices that we recommend, refer to the [best practices section.]({{}}/rancher/v2.x/en/best-practices) - -### Node Requirements - -* Make sure your nodes fulfill all of the [node requirements,]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/) including the port requirements. - -### Back up etcd - -* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. etcd is the location where the state of your cluster is stored, and losing etcd data means losing your cluster. Make sure you configure [etcd Recurring Snapshots]({{}}/rancher/v2.x/en/backups/backups/ha-backups/#option-a-recurring-snapshots) for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. - -### Cluster Architecture - -* Nodes should have one of the following role configurations: - * `etcd` - * `controlplane` - * `etcd` and `controlplane` - * `worker` (the `worker` role should not be used or added on nodes with the `etcd` or `controlplane` role) -* Have at least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. -* Assign two or more nodes the `controlplane` role for master component high availability. -* Assign two or more nodes the `worker` role for workload rescheduling upon node failure. - -For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles) - -For more information about the -number of nodes for each Kubernetes role, refer to the section on [recommended architecture.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/) - -### Logging and Monitoring - -* Configure alerts/notifiers for Kubernetes components (System Service). -* Configure logging for cluster analysis and post-mortems. - -### Reliability - -* Perform load tests on your cluster to verify that its hardware can support your workloads. - -### Networking - -* Minimize network latency. Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These [settings for etcd tuning](https://coreos.com/etcd/docs/latest/tuning.html) allow etcd to run in most networks (except really high latency networks). -* Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/production-checklist/nodes-and-roles/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/production-checklist/nodes-and-roles/_index.md deleted file mode 100644 index 3722a97e451..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/production-checklist/nodes-and-roles/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Roles for Nodes in Kubernetes -weight: 1 ---- - -This section describes the roles for etcd nodes, controlplane nodes, and worker nodes in Kubernetes, and how the roles work together in a cluster. - -This diagram is applicable to Kubernetes clusters [launched with Rancher using RKE.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -![Cluster diagram]({{}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid - -# etcd - -Nodes with the `etcd` role run etcd, which is a consistent and highly available key value store used as Kubernetes’ backing store for all cluster data. etcd replicates the data to each node. - ->**Note:** Nodes with the `etcd` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. - -# controlplane - -Nodes with the `controlplane` role run the Kubernetes master components (excluding `etcd`, as it's a separate role). See [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) for a detailed list of components. - ->**Note:** Nodes with the `controlplane` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. - -### kube-apiserver - -The Kubernetes API server (`kube-apiserver`) scales horizontally. Each node with the role `controlplane` will be added to the NGINX proxy on the nodes with components that need to access the Kubernetes API server. This means that if a node becomes unreachable, the local NGINX proxy on the node will forward the request to another Kubernetes API server in the list. - -### kube-controller-manager - -The Kubernetes controller manager uses leader election using an endpoint in Kubernetes. One instance of the `kube-controller-manager` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). - -### kube-scheduler - -The Kubernetes scheduler uses leader election using an endpoint in Kubernetes. One instance of the `kube-scheduler` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). - -# worker - -Nodes with the `worker` role run the Kubernetes node components. See [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for a detailed list of components. - -# References - -* [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/production-checklist/recommended-architecture/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/production-checklist/recommended-architecture/_index.md deleted file mode 100644 index 8dd5a53dfde..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/production-checklist/recommended-architecture/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Recommended Cluster Architecture -weight: 1 ---- - -There are three roles that can be assigned to nodes: `etcd`, `controlplane` and `worker`. - -# Separating Worker Nodes from Nodes with Other Roles - -When designing your cluster(s), you have two options: - -* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [port requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/). -* Assign the `etcd` and `controlplane` roles to the same nodes. These nodes must meet the hardware requirements for both roles. - -In either case, the `worker` role should not be used or added to nodes with the `etcd` or `controlplane` role. - -Therefore, each node should have one of the following role configurations: - - * `etcd` - * `controlplane` - * Both `etcd` and `controlplane` - * `worker` - -# Recommended Number of Nodes with Each Role - -The cluster should have: - -- At least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. -- At least two nodes with the role `controlplane` for master component high availability. -- At least two nodes with the role `worker` for workload rescheduling upon node failure. - -For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles) - - -### Number of Controlplane Nodes - -Adding more than one node with the `controlplane` role makes every master component highly available. - -### Number of etcd Nodes - -The number of nodes that you can lose at once while maintaining cluster availability is determined by the number of nodes assigned the `etcd` role. For a cluster with n members, the minimum is (n/2)+1. Therefore, we recommend creating an `etcd` node in 3 different availability zones within a region to survive the loss of one availability zone. If you use only two zones, you can only survive the loss of the zone where you don't lose the majority of nodes. - -| Nodes with `etcd` role | Majority | Failure Tolerance | -|--------------|------------|-------------------| -| 1 | 1 | 0 | -| 2 | 2 | 0 | -| 3 | 2 | **1** | -| 4 | 3 | 1 | -| 5 | 3 | **2** | -| 6 | 4 | 2 | -| 7 | 4 | **3** | -| 8 | 5 | 3 | -| 9 | 5 | **4** | - -References: - -* [Official etcd documentation on optimal etcd cluster size](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance) -* [Official Kubernetes documentation on operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) - -### Number of Worker Nodes - -Adding more than one node with the `worker` role will make sure your workloads can be rescheduled if a node fails. - -### Why Production Requirements are Different for the Rancher Cluster and the Clusters Running Your Applications - -You may have noticed that our [Kubernetes Install]({{}}/rancher/v2.x/en/installation/k8s-install/) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, because: - -* It allows one `etcd` node failure. -* It maintains multiple instances of the master components by having multiple `controlplane` nodes. -* No other workloads than Rancher itself should be created on this cluster. - -# References - -* [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/_index.md deleted file mode 100644 index 5f581d02208..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Launching Kubernetes with Rancher -weight: 3 ---- - -You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: - -- Bare-metal servers -- On-premise virtual machines -- Virtual machines hosted by an infrastructure provider - -Rancher can install Kubernetes on existing nodes, or it can dynamically provision nodes in an infrastructure provider and install Kubernetes on them. - -RKE clusters include clusters that Rancher launched on Windows nodes or other existing custom nodes, as well as clusters that Rancher launched with new nodes on Azure, Digital Ocean, EC2, or vSphere. - -### Requirements - -If you use RKE to set up a cluster, your nodes must meet the [requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements) for nodes in downstream user clusters. - -### Launching Kubernetes on New Nodes in an Infrastructure Provider - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your cloud providers. - -One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. - -For more information, refer to the section on [launching Kubernetes on new nodes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) - -### Launching Kubernetes on Existing Custom Nodes - -In this scenario, you want to install Kubernetes on bare-metal servers, on-premise virtual machines, or virtual machines that already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. - -If you want to reuse a node from a previous custom cluster, [clean the node]({{}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. - -For more information, refer to the section on [custom nodes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/cluster-config-reference/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/cluster-config-reference/_index.md deleted file mode 100644 index e899bcb10db..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/cluster-config-reference/_index.md +++ /dev/null @@ -1,386 +0,0 @@ ---- -title: Cluster Configuration Reference -weight: 5 ---- - -As you configure a new cluster that's [provisioned using RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), you can choose custom Kubernetes options. - -You can configure Kubernetes options one of two ways: - -- [Rancher UI](#rancher-ui): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. -- [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. - -In Rancher v2.0.0-v2.2.x, the config file is identical to the [cluster config file for the Rancher Kubernetes Engine]({{}}/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the section about the [cluster config file.](#cluster-config-file) - -This section is a cluster configuration reference, covering the following topics: - -- [Rancher UI Options](#rancher-ui-options) - - [Kubernetes version](#kubernetes-version) - - [Network provider](#network-provider) - - [Kubernetes cloud providers](#kubernetes-cloud-providers) - - [Private registries](#private-registries) - - [Authorized cluster endpoint](#authorized-cluster-endpoint) -- [Advanced Options](#advanced-options) - - [NGINX Ingress](#nginx-ingress) - - [Node port range](#node-port-range) - - [Metrics server monitoring](#metrics-server-monitoring) - - [Pod security policy support](#pod-security-policy-support) - - [Docker version on nodes](#docker-version-on-nodes) - - [Docker root directory](#docker-root-directory) - - [Recurring etcd snapshots](#recurring-etcd-snapshots) -- [Cluster config file](#cluster-config-file) - - [Config file structure in Rancher v2.3.0+](#config-file-structure-in-rancher-v2-3-0+) - - [Config file structure in Rancher v2.0.0-v2.2.x](#config-file-structure-in-rancher-v2-0-0-v2-2-x) - - [Default DNS provider](#default-dns-provider) -- [Rancher specific parameters](#rancher-specific-parameters) - -# Rancher UI Options - -When creating a cluster using one of the options described in [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), you can configure basic Kubernetes options using the **Cluster Options** section. - -### Kubernetes Version - -The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). - -### Network Provider - -The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ]({{}}/rancher/v2.x/en/faq/networking/cni-providers/). - ->**Note:** After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. - -Out of the box, Rancher is compatible with the following network providers: - -- [Canal](https://github.com/projectcalico/canal) -- [Flannel](https://github.com/coreos/flannel#flannel) -- [Calico](https://docs.projectcalico.org/v3.11/introduction/) -- [Weave](https://github.com/weaveworks/weave) (Available as of v2.2.0) - -**Notes on Canal:** - -In v2.0.0 - v2.0.4 and v2.0.6, this was the default option for these clusters was Canal with network isolation. With the network isolation automatically enabled, it prevented any pod communication between [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). - -As of v2.0.7, if you use Canal, you also have the option of using **Project Network Isolation**, which will enable or disable communication between pods in different [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). - ->**Attention Rancher v2.0.0 - v2.0.6 Users** -> ->- In previous Rancher releases, Canal isolates project network communications with no option to disable it. If you are using any of these Rancher releases, be aware that using Canal prevents all communication between pods in different projects. ->- If you have clusters using Canal and are upgrading to v2.0.7, those clusters enable Project Network Isolation by default. If you want to disable Project Network Isolation, edit the cluster and disable the option. - -**Notes on Flannel:** - -In v2.0.5, this was the default option, which did not prevent any network isolation between projects. - -**Notes on Weave:** - -When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) and the [Weave Network Plug-in Options]({{}}/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). - -### Kubernetes Cloud Providers - -You can configure a [Kubernetes cloud provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers). If you want to use [volumes and storage]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. - ->**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation]({{}}/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. - -If you want to see all the configuration options for a cluster, please click **Show advanced options** on the bottom right. The advanced options are described below: - -### Private registries - -The cluster-level private registry configuration is only used for provisioning clusters. - -There are two main ways to set up private registries in Rancher: by setting up the [global default registry]({{}}/rancher/v2.x/en/admin-settings/config-private-registry) through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. - -If your private registry requires credentials, you need to pass the credentials to Rancher by editing the cluster options for each cluster that needs to pull images from the registry. - -The private registry configuration option tells Rancher where to pull the [system images]({{}}/rke/latest/en/config-options/system-images/) or [addon images]({{}}/rke/latest/en/config-options/add-ons/) that will be used in your cluster. - -- **System images** are components needed to maintain the Kubernetes cluster. -- **Add-ons** are used to deploy several cluster components, including network plug-ins, the ingress controller, the DNS provider, or the metrics server. - -See the [RKE documentation on private registries]({{}}/rke/latest/en/config-options/private-registries/) for more information on the private registry for components applied during the provisioning of the cluster. - -### Authorized Cluster Endpoint - -Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. - -> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE]({{}}/rancher/v2.x/en/overview/architecture/#tools-for-provisioning-kubernetes-clusters) to provision the cluster. It is not available for clusters in a hosted Kubernetes provider, such as Amazon's EKS. - -This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. - -For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) - -We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) - -# Advanced Options - -The following options are available when you create clusters in the Rancher UI. They are located under **Advanced Options.** - -### NGINX Ingress - -Option to enable or disable the [NGINX ingress controller]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/). - -### Node Port Range - -Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). Default is `30000-32767`. - -### Metrics Server Monitoring - -Option to enable or disable [Metrics Server]({{}}/rke/latest/en/config-options/add-ons/metrics-server/). - -### Pod Security Policy Support - -Option to enable and select a default [Pod Security Policy]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies). You must have an existing Pod Security Policy configured before you can use this option. - -### Docker Version on Nodes - -Option to require [a supported Docker version]({{}}/rancher/v2.x/en/installation/requirements/) installed on the cluster nodes that are added to the cluster, or to allow unsupported Docker versions installed on the cluster nodes. - -### Docker Root Directory - -If the nodes you are adding to the cluster have Docker configured with a non-default Docker Root Directory (default is `/var/lib/docker`), please specify the correct Docker Root Directory in this option. - -### Recurring etcd Snapshots - -Option to enable or disable [recurring etcd snapshots]({{}}/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). - -# Cluster Config File - -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available]({{}}/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. - ->**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. - -- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. -- To read from an existing RKE file, click **Read from a file**. - -![image]({{}}/img/rancher/cluster-options-yaml.png) - -The structure of the config file is different depending on your version of Rancher. Below are example config files for Rancher v2.0.0-v2.2.x and for Rancher v2.3.0+. - -### Config File Structure in Rancher v2.3.0+ - -RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,]({{}}/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below. - -{{% accordion id="v2.3.0-cluster-config-file" label="Example Cluster Config File for Rancher v2.3.0+" %}} - -```yaml -# -# Cluster Config -# -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: # Your RKE template config goes here. - addon_job_timeout: 30 - authentication: - strategy: x509 - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.15.3-rancher3-1 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 0 - retention: 72h - snapshot: false - uid: 0 - kube_api: - always_pull_images: false - pod_security_policy: false - service_node_port_range: 30000-32767 - ssh_agent_auth: false -windows_prefered_cluster: false -``` -{{% /accordion %}} - -### Config File Structure in Rancher v2.0.0-v2.2.x - -An example cluster config file is included below. - -{{% accordion id="prior-to-v2.3.0-cluster-config-file" label="Example Cluster Config File for Rancher v2.0.0-v2.2.x" %}} -```yaml -addon_job_timeout: 30 -authentication: - strategy: x509 -ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# -ingress: - provider: nginx -kubernetes_version: v1.15.3-rancher3-1 -monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# -network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# -services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 0 - retention: 72h - snapshot: false - uid: 0 - kube_api: - always_pull_images: false - pod_security_policy: false - service_node_port_range: 30000-32767 -ssh_agent_auth: false -``` -{{% /accordion %}} - -### Default DNS provider - -The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider]({{}}/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. - -| Rancher version | Kubernetes version | Default DNS provider | -|-------------|--------------------|----------------------| -| v2.2.5 and higher | v1.14.0 and higher | CoreDNS | -| v2.2.5 and higher | v1.13.x and lower | kube-dns | -| v2.2.4 and lower | any | kube-dns | - -# Rancher specific parameters - -Besides the RKE config file options, there are also Rancher specific settings that can be configured in the Config File (YAML): - -### docker_root_dir - -See [Docker Root Directory](#docker-root-directory). - -### enable_cluster_monitoring - -Option to enable or disable [Cluster Monitoring]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/). - -### enable_network_policy - -Option to enable or disable Project Network Isolation. - -### local_cluster_auth_endpoint - -See [Authorized Cluster Endpoint](#authorized-cluster-endpoint). - -Example: - -```yaml -local_cluster_auth_endpoint: - enabled: true - fqdn: "FQDN" - ca_certs: "BASE64_CACERT" -``` - -### Custom Network Plug-in - -You can add a custom network plug-in by using the [user-defined add-on functionality]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. - -There are two ways that you can specify an add-on: - -- [In-line Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) -- [Referencing YAML Files for Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) - -For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.]({{}}/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/cluster-config-reference/pod-security-policies/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/cluster-config-reference/pod-security-policies/_index.md deleted file mode 100644 index 52f54070a84..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/cluster-config-reference/pod-security-policies/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Assigning Pod Security Policies -weight: 1 ---- - -_Pod Security Policies_ are objects that control security-sensitive aspects of pod specification (like root privileges). - -## Adding a Default Pod Security Policy - -When you create a new cluster with RKE, you can configure it to apply a PSP immediately. As you create the cluster, use the **Cluster Options** to enable a PSP. The PSP assigned to the cluster will be the default PSP for projects within the cluster. - ->**Prerequisite:** ->Create a Pod Security Policy within Rancher. Before you can assign a default PSP to a new cluster, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/). ->**Note:** ->For security purposes, we recommend assigning a PSP as you create your clusters. - -To enable a default Pod Security Policy, set the **Pod Security Policy Support** option to **Enabled**, and then make a selection from the **Default Pod Security Policy** drop-down. - -When the cluster finishes provisioning, the PSP you selected is applied to all projects within the cluster. diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/custom-nodes/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/custom-nodes/_index.md deleted file mode 100644 index 59ccd70fa8d..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/custom-nodes/_index.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Launching Kubernetes on Existing Custom Nodes -description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements -metaDescription: "To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements" -weight: 3 ---- - -When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-premise bare-metal servers, on-premise virtual machines, or in any node hosted by an infrastructure provider. - -To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements), which includes some hardware specifications and Docker. After you install Docker on each server, run the command provided in the Rancher UI to turn each server into a Kubernetes node. - -This section describes how to set up a custom cluster. - -# Creating a Cluster with Custom Nodes - ->**Want to use Windows hosts as Kubernetes workers?** -> ->See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) before you start. - - - -- [1. Provision a Linux Host](#1-provision-a-linux-host) -- [2. Create the Custom Cluster](#2-create-the-custom-cluster) -- [3. Amazon Only: Tag Resources](#3-amazon-only-tag-resources) - - - -### 1. Provision a Linux Host - -Begin creation of a custom cluster by provisioning a Linux host. Your host can be: - -- A cloud-host virtual machine (VM) -- An on-premise VM -- A bare-metal server - -If you want to reuse a node from a previous custom cluster, [clean the node]({{}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. - -Provision the host according to the [installation requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements) and the [checklist for production-ready clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/production) - -### 2. Create the Custom Cluster - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Custom**. - -3. Enter a **Cluster Name**. - -4. {{< step_create-cluster_member-roles >}} - -5. {{< step_create-cluster_cluster-options >}} - - >**Using Windows nodes as Kubernetes workers?** - > - >- See [Enable the Windows Support Option]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/#enable-the-windows-support-option). - >- The only Network Provider available for clusters with Windows support is Flannel. See [Networking Option]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/#networking-option). -6. Click **Next**. - -7. From **Node Role**, choose the roles that you want filled by a cluster node. - - >**Notes:** - > - >- Using Windows nodes as Kubernetes workers? See [Node Configuration]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/#node-configuration). - >- Bare-Metal Server Reminder: If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). - -8. **Optional**: Click **[Show advanced options]({{}}/rancher/v2.x/en/admin-settings/agent-options/)** to specify IP address(es) to use when registering the node, override the hostname of the node, or to add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to the node. - -9. Copy the command displayed on screen to your clipboard. - -10. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. - - >**Note:** Repeat steps 7-10 if you want to dedicate specific hosts to specific node roles. Repeat the steps as many times as needed. - -11. When you finish running the command(s) on your Linux host(s), click **Done**. - -{{< result_create-cluster >}} - -### 3. Amazon Only: Tag Resources - -If you have configured your cluster to use Amazon as **Cloud Provider**, tag your AWS resources with a cluster ID. - -[Amazon Documentation: Tagging Your Amazon EC2 Resources](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) - ->**Note:** You can use Amazon EC2 instances without configuring a cloud provider in Kubernetes. You only have to configure the cloud provider if you want to use specific Kubernetes cloud provider functionality. For more information, see [Kubernetes Cloud Providers](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) - - -The following resources need to tagged with a `ClusterID`: - -- **Nodes**: All hosts added in Rancher. -- **Subnet**: The subnet used for your cluster -- **Security Group**: The security group used for your cluster. - - >**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating Elastic Load Balancer. - -The tag that should be used is: - -``` -Key=kubernetes.io/cluster/, Value=owned -``` - -`` can be any string you choose. However, the same string must be used on every resource you tag. Setting the tag value to `owned` informs the cluster that all resources tagged with the `` are owned and managed by this cluster. - -If you share resources between clusters, you can change the tag to: - -``` -Key=kubernetes.io/cluster/CLUSTERID, Value=shared -``` - -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/custom-nodes/agent-options/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/custom-nodes/agent-options/_index.md deleted file mode 100644 index 769bd5e0d01..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/custom-nodes/agent-options/_index.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Rancher Agent Options -weight: 2500 ---- - -Rancher deploys an agent on each node to communicate with the node. This pages describes the options that can be passed to the agent. To use these options, you will need to [create a cluster with custom nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes) and add the options to the generated `docker run` command when adding a node. - -For an overview of how Rancher communicates with downstream clusters using node agents, refer to the [architecture section.]({{}}/rancher/v2.x/en/overview/architecture/#3-node-agents) - -## General options - -| Parameter | Environment variable | Description | -| ---------- | -------------------- | ----------- | -| `--server` | `CATTLE_SERVER` | The configured Rancher `server-url` setting which the agent connects to | -| `--token` | `CATTLE_TOKEN` | Token that is needed to register the node in Rancher | -| `--ca-checksum` | `CATTLE_CA_CHECKSUM` | The SHA256 checksum of the configured Rancher `cacerts` setting to validate | -| `--node-name` | `CATTLE_NODE_NAME` | Override the hostname that is used to register the node (defaults to `hostname -s`) | -| `--label` | `CATTLE_NODE_LABEL` | Add node labels to the node. For multiple labels, pass additional `--label` options. (`--label key=value`) | -| `--taints` | `CATTLE_NODE_TAINTS` | Add node taints to the node. For multiple taints, pass additional `--taints` options. (`--taints key=value:effect`) | - -## Role options - -| Parameter | Environment variable | Description | -| ---------- | -------------------- | ----------- | -| `--all-roles` | `ALL=true` | Apply all roles (`etcd`,`controlplane`,`worker`) to the node | -| `--etcd` | `ETCD=true` | Apply the role `etcd` to the node | -| `--controlplane` | `CONTROL=true` | Apply the role `controlplane` to the node | -| `--worker` | `WORKER=true` | Apply the role `worker` to the node | - -## IP address options - -| Parameter | Environment variable | Description | -| ---------- | -------------------- | ----------- | -| `--address` | `CATTLE_ADDRESS` | The IP address the node will be registered with (defaults to the IP used to reach `8.8.8.8`) | -| `--internal-address` | `CATTLE_INTERNAL_ADDRESS` | The IP address used for inter-host communication on a private network | - -### Dynamic IP address options - -For automation purposes, you can't have a specific IP address in a command as it has to be generic to be used for every node. For this, we have dynamic IP address options. They are used as a value to the existing IP address options. This is supported for `--address` and `--internal-address`. - -| Value | Example | Description | -| ---------- | -------------------- | ----------- | -| Interface name | `--address eth0` | The first configured IP address will be retrieved from the given interface | -| `ipify` | `--address ipify` | Value retrieved from `https://api.ipify.org` will be used | -| `awslocal` | `--address awslocal` | Value retrieved from `http://169.254.169.254/latest/meta-data/local-ipv4` will be used | -| `awspublic` | `--address awspublic` | Value retrieved from `http://169.254.169.254/latest/meta-data/public-ipv4` will be used | -| `doprivate` | `--address doprivate` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address` will be used | -| `dopublic` | `--address dopublic` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address` will be used | -| `azprivate` | `--address azprivate` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2017-08-01&format=text` will be used | -| `azpublic` | `--address azpublic` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-08-01&format=text` will be used | -| `gceinternal` | `--address gceinternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip` will be used | -| `gceexternal` | `--address gceexternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip` will be used | -| `packetlocal` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/local-ipv4` will be used | -| `packetpublic` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/public-ipv4` will be used | diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/_index.md deleted file mode 100644 index e832003971f..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/_index.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Launching Kubernetes on New Nodes in an Infrastructure Provider -weight: 2 ---- - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. - -One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. - -The available cloud providers to create a node template are decided based on active [node drivers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers). - -This section covers the following topics: - -- [Node templates](#node-templates) - - [Node labels](#node-labels) - - [Node taints](#node-taints) - - [Administrator control of node templates](#administrator-control-of-node-templates) -- [Node pools](#node-pools) - - [Node pool taints](#node-pool-taints) - - [About node auto-replace](#about-node-auto-replace) - - [Enabling node auto-replace](#enabling-node-auto-replace) - - [Disabling node auto-replace](#disabling-node-auto-replace) -- [Cloud credentials](#cloud-credentials) -- [Node drivers](#node-drivers) - -# Node Templates - -A node template is the saved configuration for the parameters to use when provisioning nodes in a specific cloud provider. These nodes can be launched from the UI. Rancher uses [Docker Machine](https://docs.docker.com/machine/) to provision these nodes. The available cloud providers to create node templates are based on the active node drivers in Rancher. - -After you create a node template in Rancher, it's saved so that you can use this template again to create node pools. Node templates are bound to your login. After you add a template, you can remove them from your user profile. - -### Node Labels - -You can add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) on each node template, so that any nodes created from the node template will automatically have these labels on them. - -### Node Taints - -You can add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on each node template, so that any nodes created from the node template will automatically have these taints on them. - -Since taints can be added at a node template and node pool, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. - -### Administrator Control of Node Templates - -Administrators can control all node templates. Admins can now maintain all the node templates within Rancher. When a node template owner is no longer using Rancher, the node templates created by them can be managed by administrators so the cluster can continue to be updated and maintained. - -To access all node templates, an administrator will need to do the following: - -1. In the Rancher UI, click the user profile icon in the upper right corner. -1. Click **Node Templates.** - -**Result:** All node templates are listed and grouped by owner. The templates can be edited or cloned by clicking the **⋮.** - -# Node Pools - -Using Rancher, you can create pools of nodes based on a [node template](#node-templates). The benefit of using a node pool is that if a node is destroyed or deleted, you can increase the number of live nodes to compensate for the node that was lost. The node pool helps you ensure that the count of the node pool is as expected. - -Each node pool is assigned with a [node component]({{}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) to specify how these nodes should be configured for the Kubernetes cluster. - -### Node Pool Taints - -If you haven't defined [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on your node template, you can add taints for each node pool. The benefit of adding taints at a node pool is beneficial over adding it at a node template is that you can swap out the node templates without worrying if the taint is on the node template. - -For each taint, they will automatically be added to any created node in the node pool. Therefore, if you add taints to a node pool that have existing nodes, the taints won't apply to existing nodes in the node pool, but any new node added into the node pool will get the taint. - -When there are taints on the node pool and node template, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. - -### About Node Auto-replace - -If a node is in a node pool, Rancher can automatically replace unreachable nodes. Rancher will use the existing node template for the given node pool to recreate the node if it becomes inactive for a specified number of minutes. - -> **Important:** Self-healing node pools are designed to help you replace worker nodes for **stateless** applications. It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. - -{{% accordion id="how-does-node-auto-replace-work" label="How does Node Auto-replace Work?" %}} - Node auto-replace works on top of the Kubernetes node controller. The node controller periodically checks the status of all the nodes (configurable via the `--node-monitor-period` flag of the `kube-controller`). When a node is unreachable, the node controller will taint that node. When this occurs, Rancher will begin its deletion countdown. You can configure the amount of time Rancher waits to delete the node. If the taint is not removed before the deletion countdown ends, Rancher will proceed to delete the node object. Rancher will then provision a node in accordance with the set quantity of the node pool. -{{% /accordion %}} - -### Enabling Node Auto-replace - -When you create the node pool, you can specify the amount of time in minutes that Rancher will wait to replace an unresponsive node. - -1. In the form for creating a cluster, go to the **Node Pools** section. -1. Go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. -1. Fill out the rest of the form for creating a cluster. - -**Result:** Node auto-replace is enabled for the node pool. - -You can also enable node auto-replace after the cluster is created with the following steps: - -1. From the Global view, click the Clusters tab. -1. Go to the cluster where you want to enable node auto-replace, click the vertical ⋮ **(…)**, and click **Edit.** -1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. -1. Click **Save.** - -**Result:** Node auto-replace is enabled for the node pool. - -### Disabling Node Auto-replace - -You can disable node auto-replace from the Rancher UI with the following steps: - -1. From the Global view, click the Clusters tab. -1. Go to the cluster where you want to enable node auto-replace, click the vertical ⋮ **(…)**, and click **Edit.** -1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter 0. -1. Click **Save.** - -**Result:** Node auto-replace is disabled for the node pool. - -# Cloud Credentials - -Node templates can use cloud credentials to store credentials for launching nodes in your cloud provider, which has some benefits: - -- Credentials are stored as a Kubernetes secret, which is not only more secure, but it also allows you to edit a node template without having to enter your credentials every time. - -- After the cloud credential is created, it can be re-used to create additional node templates. - -- Multiple node templates can share the same cloud credential to create node pools. If your key is compromised or expired, the cloud credential can be updated in a single place, which allows all node templates that are using it to be updated at once. - -> **Note:** As of v2.2.0, the default `active` [node drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use cloud credentials. If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. - -After cloud credentials are created, the user can start [managing the cloud credentials that they created]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/). - -# Node Drivers - -If you don't find the node driver that you want to use, you can see if it is available in Rancher's built-in [node drivers and activate it]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/#activating-deactivating-node-drivers), or you can [add your own custom node driver]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/#adding-custom-node-drivers). diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/azure/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/azure/_index.md deleted file mode 100644 index a74cbf61504..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/azure/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Creating an Azure Cluster -shortTitle: Azure -weight: 2220 ---- - -Use {{< product >}} to create a Kubernetes cluster in Azure. - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Azure**. - -3. Enter a **Cluster Name**. - -4. {{< step_create-cluster_member-roles >}} - -5. {{< step_create-cluster_cluster-options >}} - -6. {{< step_create-cluster_node-pools >}} - - 1. Click **Add Node Template**. - - 2. Complete the **Azure Options** form. - - - **Account Access** stores your account information for authenticating with Azure. Note: As of v2.2.0, account access information is stored as a cloud credentials. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. To create a new cloud credential, enter **Name** and **Account Access** data, then click **Create.** - - - **Placement** sets the geographical region where your cluster is hosted and other location metadata. - - - **Network** configures the networking used in your cluster. - - - **Instance** customizes your VM configuration. - - 3. {{< step_rancher-template >}} - - 4. Click **Create**. - - 5. **Optional:** Add additional node pools. -
-7. Review your options to confirm they're correct. Then click **Create**. - -{{< result_create-cluster >}} - -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/digital-ocean/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/digital-ocean/_index.md deleted file mode 100644 index 36c912de8ce..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/digital-ocean/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Creating a DigitalOcean Cluster -shortTitle: DigitalOcean -weight: 2215 ---- -Use {{< product >}} to create a Kubernetes cluster using DigitalOcean. - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **DigitalOcean**. - -3. Enter a **Cluster Name**. - -4. {{< step_create-cluster_member-roles >}} - -5. {{< step_create-cluster_cluster-options >}} - -6. {{< step_create-cluster_node-pools >}} - - 1. Click **Add Node Template**. Note: As of v2.2.0, account access information is stored as a cloud credentials. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. To create a new cloud credential, enter **Name** and **Account Access** data, then click **Create.** - - 2. Complete the **Digital Ocean Options** form. - - - **Access Token** stores your DigitalOcean Personal Access Token. Refer to [DigitalOcean Instructions: How To Generate a Personal Access Token](https://www.digitalocean.com/community/tutorials/how-to-use-the-digitalocean-api-v2#how-to-generate-a-personal-access-token). - - - **Droplet Options** provision your cluster's geographical region and specifications. - - 4. {{< step_rancher-template >}} - - 5. Click **Create**. - - 6. **Optional:** Add additional node pools. -
-7. Review your options to confirm they're correct. Then click **Create**. - -{{< result_create-cluster >}} - -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/ec2/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/ec2/_index.md deleted file mode 100644 index 4754951afde..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/ec2/_index.md +++ /dev/null @@ -1,266 +0,0 @@ ---- -title: Creating an Amazon EC2 Cluster -shortTitle: Amazon EC2 -description: Learn the prerequisites and steps required in order for you to create an Amazon EC2 cluster using Rancher -weight: 2210 ---- -Use Rancher to create a Kubernetes cluster in Amazon EC2. - -### Prerequisites - -- **AWS EC2 Access Key and Secret Key** that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. -- **IAM Policy created** to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: - - [Example IAM Policy](#example-iam-policy) - - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) - - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) -- **IAM Policy added as Permission** to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. - -# Creating an EC2 Cluster - -The steps to create a cluster differ based on your Rancher version. - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials and information from EC2](#2-create-a-node-template-with-your-cloud-credentials-and-information-from-ec2) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **Amazon.** -1. In the **Region** field, select the AWS region where your cluster nodes will be located. -1. Enter your AWS EC2 **Access Key** and **Secret Key.** -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials and information from EC2 -Complete each of the following forms using information available from the [EC2 Management Console](https://aws.amazon.com/ec2). - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. In the **Region** field, select the same region that you used when creating your cloud credentials. -1. In the **Cloud Credentials** field, select your newly created cloud credentials. -1. Click **Next: Authenticate & configure nodes.** -1. Choose an availability zone and network settings for your cluster. Click **Next: Select a Security Group.** -1. Choose the default security group or configure a security group. Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#security-group-for-nodes-on-aws-ec2) to see what rules are created in the `rancher-nodes` Security Group. Then click **Next: Set Instance options.** -1. Configure the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI. - -> If you need to pass an IAM Instance Profile Name (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. - -Optional: In the **Engine Options** section of the node template, you can configure the Docker daemon. You may want to specify the docker version or a Docker registry mirror. - -### 3. Create a cluster with node pools using the node template - -{{< step_create-cluster_node-pools >}} - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Amazon EC2**. - -1. Enter a **Cluster Name**. - -1. Create a node pool for each Kubernetes role. For each node pool, choose a node template that you created. - -1. Click **Add Member** to add users that can access the cluster. - -1. Use the **Role** drop-down to set permissions for each user. - -1. Use **Cluster Options** to choose the version of Kubernetes, what network provider will be used and if you want to enable project network isolation. Refer to [Selecting Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. - -1. Click **Create**. - -{{< result_create-cluster >}} -{{% /tab %}} -{{% tab "Rancher prior to v2.2.0+" %}} - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Amazon EC2**. - -1. Enter a **Cluster Name**. - -1. {{< step_create-cluster_member-roles >}} - -1. {{< step_create-cluster_cluster-options >}}Refer to [Selecting Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. - -1. {{< step_create-cluster_node-pools >}} - - 1. Click **Add Node Template**. - - 1. Complete each of the following forms using information available from the [EC2 Management Console](https://aws.amazon.com/ec2). - - - **Account Access** is where you configure the region of the nodes, and the credentials (Access Key and Secret Key) used to create the machine. See [Prerequisites](#prerequisites) how to create the Access Key and Secret Key and the needed permissions. - - **Zone and Network** configures the availability zone and network settings for your cluster. - - **Security Groups** creates or configures the Security Groups applied to your nodes. Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#security-group-for-nodes-on-aws-ec2) to see what rules are created in the `rancher-nodes` Security Group. - - **Instance** configures the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI. -

- If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. - -1. {{< step_rancher-template >}} -1. Click **Create**. -1. **Optional:** Add additional node pools. -1. Review your cluster settings to confirm they are correct. Then click **Create**. - -{{< result_create-cluster >}} -{{% /tab %}} -{{% /tabs %}} - -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. - -### Example IAM Policy - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:Describe*", - "ec2:ImportKeyPair", - "ec2:CreateKeyPair", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteKeyPair" - ], - "Resource": "*" - }, - { - "Sid": "VisualEditor1", - "Effect": "Allow", - "Action": [ - "ec2:RunInstances" - ], - "Resource": [ - "arn:aws:ec2:REGION::image/ami-*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*" - ] - }, - { - "Sid": "VisualEditor2", - "Effect": "Allow", - "Action": [ - "ec2:RebootInstances", - "ec2:TerminateInstances", - "ec2:StartInstances", - "ec2:StopInstances" - ], - "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" - } - ] -} -``` - -### Example IAM Policy with PassRole - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:Describe*", - "ec2:ImportKeyPair", - "ec2:CreateKeyPair", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteKeyPair" - ], - "Resource": "*" - }, - { - "Sid": "VisualEditor1", - "Effect": "Allow", - "Action": [ - "iam:PassRole", - "ec2:RunInstances" - ], - "Resource": [ - "arn:aws:ec2:REGION::image/ami-*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*", - "arn:aws:iam::AWS_ACCOUNT_ID:role/YOUR_ROLE_NAME" - ] - }, - { - "Sid": "VisualEditor2", - "Effect": "Allow", - "Action": [ - "ec2:RebootInstances", - "ec2:TerminateInstances", - "ec2:StartInstances", - "ec2:StopInstances" - ], - "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" - } - ] -} -``` -### Example IAM Policy to allow encrypted EBS volumes -``` json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "kms:Decrypt", - "kms:GenerateDataKeyWithoutPlaintext", - "kms:Encrypt", - "kms:DescribeKey", - "kms:CreateGrant", - "ec2:DetachVolume", - "ec2:AttachVolume", - "ec2:DeleteSnapshot", - "ec2:DeleteTags", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:DeleteVolume", - "ec2:CreateSnapshot" - ], - "Resource": [ - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:snapshot/*", - "arn:aws:kms:REGION:AWS_ACCOUNT_ID:key/KMS_KEY_ID" - ] - }, - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeSnapshots" - ], - "Resource": "*" - } - ] -} -``` diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/_index.md deleted file mode 100644 index dbc0be0f28e..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Creating a vSphere Cluster -shortTitle: vSphere -description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. -metaDescription: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. -weight: 2225 ---- - -By using Rancher with vSphere, you can bring cloud operations on-premises. - -Rancher can provision nodes in vSphere and install Kubernetes on them. When creating a Kubernetes cluster in vSphere, Rancher first provisions the specified number of virtual machines by communicating with the vCenter API. Then it installs Kubernetes on top of them. - -A vSphere cluster may consist of multiple groups of VMs with distinct properties, such as the amount of memory or the number of vCPUs. This grouping allows for fine-grained control over the sizing of nodes for each Kubernetes role. - -# vSphere Enhancements - -The vSphere node templates have been updated, allowing you to bring cloud operations on-premises with the following enhancements: - -### Self-healing Node Pools - -One of the biggest advantages of provisioning vSphere nodes with Rancher is that it allows you to take advantage of Rancher's self-healing node pools, also called the [node auto-replace feature,]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-auto-replace) in your on-premises clusters. Self-healing node pools are designed to help you replace worker nodes for stateless applications. When Rancher provisions nodes from a node template, Rancher can automatically replace unreachable nodes. - -> **Important:** It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. - -### Dynamically Populated Options for Instances and Scheduling - -Node templates for vSphere have been updated so that when you create a node template with your vSphere credentials, the template is automatically populated with the same options for provisioning VMs that you have access to in the vSphere console. - -For the fields to be populated, your setup needs to fulfill the [prerequisites.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#prerequisites) - -### More Supported Operating Systems - -In Rancher v2.3.3+, you can provision VMs with any operating system that supports `cloud-init`. Only YAML format is supported for the [cloud config.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) - -In Rancher prior to v2.3.3, the vSphere node driver included in Rancher only supported the provisioning of VMs with [RancherOS]({{}}/os/v1.x/en/) as the guest operating system. - -# Video Walkthrough of v2.3.3 Node Template Features - -In this YouTube video, we demonstrate how to set up a node template with the new features designed to help you bring cloud operations to on-premises clusters. - -{{< youtube id="dPIwg6x1AlU">}} diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/_index.md deleted file mode 100644 index ca0b30b396b..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/_index.md +++ /dev/null @@ -1,317 +0,0 @@ ---- -title: Provisioning Kubernetes Clusters in vSphere -weight: 1 ---- - -This section explains how to configure Rancher with vSphere credentials, provision nodes in vSphere, and set up Kubernetes clusters on those nodes. - -# Prerequisites - -This section describes the requirements for setting up vSphere so that Rancher can provision VMs and clusters. - -The node templates are documented and tested with the vSphere Web Services API version 6.5. - -- [Create credentials in vSphere](#create-credentials-in-vsphere) -- [Network permissions](#network-permissions) -- [Valid ESXi License for vSphere API Access](#valid-esxi-license-for-vsphere-api-access) - -### Create Credentials in vSphere - -Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. When you set up a node template, the template will need to use these vSphere credentials. - -Refer to this [how-to guide]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials) for instructions on how to create a user in vSphere with the required permissions. These steps result in a username and password that you will need to provide to Rancher, which allows Rancher to provision resources in vSphere. - -### Network Permissions - -It must be ensured that the hosts running the Rancher server are able to establish the following network connections: - -- To the vSphere API on the vCenter server (usually port 443/TCP). -- To the Host API (port 443/TCP) on all ESXi hosts used to instantiate virtual machines for the clusters (*only required with Rancher prior to v2.3.3 or when using the ISO creation method in later versions*). -- To port 22/TCP and 2376/TCP on the created VMs - -See [Node Networking Requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements) for a detailed list of port requirements applicable for creating nodes on an infrastructure provider. - -### Valid ESXi License for vSphere API Access - -The free ESXi license does not support API access. The vSphere servers must have a valid or evaluation ESXi license. - -# Creating Clusters in vSphere with Rancher - -This section describes how to set up vSphere credentials, node templates, and vSphere clusters using the Rancher UI. - -You will need to do the following: - -1. [Create a node template using vSphere credentials](#1-create-a-node-template-using-vsphere-credentials) -2. [Create a Kubernetes cluster using the node template](#2-create-a-kubernetes-cluster-using-the-node-template) -3. [Optional: Provision storage](#3-optional-provision-storage) - - [Enable the vSphere cloud provider for the cluster](#enable-the-vsphere-cloud-provider-for-the-cluster) - -### Configuration References - -For details on configuring the node template, refer to the [node template configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/) - -Rancher uses the RKE library to provision Kubernetes clusters. For details on configuring clusters in vSphere, refer to the [cluster configuration reference in the RKE documentation.]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/) - -Note that the vSphere cloud provider must be [enabled](#enable-the-vsphere-cloud-provider-for-the-cluster) to allow dynamic provisioning of volumes. - -# 1. Create a Node Template Using vSphere Credentials - -To create a cluster, you need to create at least one vSphere [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) that specifies how VMs are created in vSphere. - -After you create a node template, it is saved, and you can re-use it whenever you create additional vSphere clusters. - -To create a node template, - -1. Log in with an administrator account to the Rancher UI. - -1. From the user settings menu, select **Node Templates.** - -1. Click **Add Template** and then click on the **vSphere** icon. - -Then, configure your template: - -- [A. Configure the vSphere credential](#a-configure-the-vsphere-credential) -- [B. Configure node scheduling](#b-configure-node-scheduling) -- [C. Configure instances and operating systems](#c-configure-instances-and-operating-systems) -- [D. Add networks](#d-add-networks) -- [E. If not already enabled, enable disk UUIDs](#e-if-not-already-enabled-enable-disk-uuids) -- [F. Optional: Configure node tags and custom attributes](#f-optional-configure-node-tags-and-custom-attributes) -- [G. Optional: Configure cloud-init](#g-optional-configure-cloud-init) -- [H. Saving the node template](#h-saving-the-node-template) - -### A. Configure the vSphere Credential - -The steps for configuring your vSphere credentials for the cluster are different depending on your version of Rancher. - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -Your account access information is in a [cloud credential.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) Cloud credentials are stored as Kubernetes secrets. - -You can use an existing cloud credential or create a new one. To create a new cloud credential, - -1. Click **Add New.** -1. In the **Name** field, enter a name for your vSphere credentials. -1. In the **vCenter or ESXi Server** field, enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. -1. Optional: In the **Port** field, configure the port of the vCenter or ESXi server. -1. In the **Username** and **Password** fields, enter your vSphere login username and password. -1. Click **Create.** - -**Result:** The node template has the credentials required to provision nodes in vSphere. - -{{% /tab %}} -{{% tab "Rancher prior to v2.2.0" %}} -In the **Account Access** section, enter the vCenter FQDN or IP address and the credentials for the vSphere user account. -{{% /tab %}} -{{% /tabs %}} - -### B. Configure Node Scheduling - -Choose what hypervisor the virtual machine will be scheduled to. The configuration options depend on your version of Rancher. - -{{% tabs %}} -{{% tab "Rancher v2.3.3+" %}} - -The fields in the **Scheduling** section should auto-populate with the data center and other scheduling options that are available to you in vSphere. - -1. In the **Data Center** field, choose the data center where the VM will be scheduled. -1. Optional: Select a **Resource Pool.** Resource pools can be used to partition available CPU and memory resources of a standalone host or cluster, and they can also be nested. -1. If you have a data store cluster, you can toggle the **Data Store** field. This lets you select a data store cluster where your VM will be scheduled to. If the field is not toggled, you can select an individual disk. -1. Optional: Select a folder where the VM will be placed. The VM folders in this dropdown menu directly correspond to your VM folders in vSphere. Note: The folder name should be prefaced with `vm/` in your vSphere config file. -1. Optional: Choose a specific host to create the VM on. Leave this field blank for a standalone ESXi or for a cluster with DRS (Distributed Resource Scheduler). If specified, the host system's pool will be used and the **Resource Pool** parameter will be ignored. -{{% /tab %}} -{{% tab "Rancher prior to v2.3.3" %}} - -In the **Scheduling** section, enter: - -- The name/path of the **Data Center** to create the VMs in -- The name of the **VM Network** to attach to -- The name/path of the **Datastore** to store the disks in - - {{< img "/img/rancher/vsphere-node-template-2.png" "image" >}} - -{{% /tab %}} -{{% /tabs %}} - -### C. Configure Instances and Operating Systems - -Depending on the Rancher version there are different options available to configure instances. - -{{% tabs %}} -{{% tab "Rancher v2.3.3+" %}} - -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -In the **Creation method** field, configure the method used to provision VMs in vSphere. Available options include creating VMs that boot from a RancherOS ISO or creating VMs by cloning from an existing virtual machine or [VM template](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-F7BF0E6B-7C4F-4E46-8BBF-76229AEA7220.html). - -The existing VM or template may use any modern Linux operating system that is configured with support for [cloud-init](https://cloudinit.readthedocs.io/en/latest/) using the [NoCloud datasource](https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html). - -Choose the way that the VM will be created: - -- **Deploy from template: Data Center:** Choose a VM template that exists in the data center that you selected. -- **Deploy from template: Content Library:** First, select the [Content Library](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-254B2CE8-20A8-43F0-90E8-3F6776C2C896.html) that contains your template, then select the template from the populated list `Library templates`. -- **Clone an existing virtual machine:** In the **Virtual machine** field, choose an existing VM that the new VM will be cloned from. -- **Install from boot2docker ISO:** Ensure that the `OS ISO URL` field contains the URL of a VMware ISO release for RancherOS (rancheros-vmware.iso). Note that this URL must be accessible from the nodes running your Rancher server installation. - -{{% /tab %}} -{{% tab "Rancher prior to v2.3.3" %}} - -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -Only VMs booting from RancherOS ISO are supported. - -Ensure that the [OS ISO URL](#instance-options) contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. - - ![image]({{}}/img/rancher/vsphere-node-template-1.png) - -{{% /tab %}} -{{% /tabs %}} - -### D. Add Networks - -The node template now allows a VM to be provisioned with multiple networks. In the **Networks** field, you can now click **Add Network** to add any networks available to you in vSphere. - -### E. If Not Already Enabled, Enable Disk UUIDs - -In order to provision nodes with RKE, all nodes must be configured with disk UUIDs. - -As of Rancher v2.0.4, disk UUIDs are enabled in vSphere node templates by default. - -If you are using Rancher prior to v2.0.4, refer to these [instructions]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#enabling-disk-uuids-with-a-node-template) for details on how to enable a UUID with a Rancher node template. - -### F. Optional: Configure Node Tags and Custom Attributes - -The way to attach metadata to the VM is different depending on your Rancher version. - -{{% tabs %}} -{{% tab "Rancher v2.3.3+" %}} - -**Optional:** Add vSphere tags and custom attributes. Tags allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -For tags, all your vSphere tags will show up as options to select from in your node template. - -In the custom attributes, Rancher will let you select all the custom attributes you have already set up in vSphere. The custom attributes are keys and you can enter values for each one. - - > **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -{{% /tab %}} -{{% tab "Rancher prior to v2.3.3" %}} - -**Optional:** - - - Provide a set of configuration parameters (instance-options) for the VMs. - - Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. - - Customize the configuration of the Docker daemon on the VMs that will be created. - -> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -{{% /tab %}} -{{% /tabs %}} - -### G. Optional: Configure cloud-init - -[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. - -The scope of cloud-init support for the VMs differs depending on the Rancher version. - -{{% tabs %}} -{{% tab "Rancher v2.3.3+" %}} - -To make use of cloud-init initialization, create a cloud config file using valid YAML syntax and paste the file content in the the **Cloud Init** field. Refer to the [cloud-init documentation.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) for a commented set of examples of supported cloud config directives. - -*Note that cloud-init is not supported when using the ISO creation method.* - -{{% /tab %}} -{{% tab "Rancher prior to v2.3.3" %}} - -You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation]https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. - -{{% /tab %}} -{{% /tabs %}} - -### H. Saving the Node Template - -Assign a descriptive **Name** for this template and click **Create.** - -### Node Template Configuration Reference - -Refer to [this section]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/) for a reference on the configuration options available for vSphere node templates. - -# 2. Create a Kubernetes Cluster Using the Node Template - -After you've created a template, you can use it to stand up the vSphere cluster itself. - -To install Kubernetes on vSphere nodes, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. This requirement applies to both pre-created [custom nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) and for nodes created in Rancher using the vSphere node driver. - -To create the cluster and enable the vSphere provider for cluster, follow these steps: - -- [A. Set up the cluster name and member roles](#a-set-up-the-cluster-name-and-member-roles) -- [B. Configure Kubernetes options](#b-configure-kubernetes-options) -- [C. Add node pools to the cluster](#c-add-node-pools-to-the-cluster) -- [D. Optional: Add a self-healing node pool](#d-optional-add-a-self-healing-node-pool) -- [E. Create the cluster](#e-create-the-cluster) - -### A. Set up the Cluster Name and Member Roles - -1. Log in to the Rancher UI as an administrator. -2. Navigate to **Clusters** in the **Global** view. -3. Click **Add Cluster** and select the **vSphere** infrastructure provider. -4. Assign a **Cluster Name.** -5. Assign **Member Roles** as required. {{< step_create-cluster_member-roles >}} - -> **Note:** -> -> If you have a cluster with DRS enabled, setting up [VM-VM Affinity Rules](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-7297C302-378F-4AF2-9BD6-6EDB1E0A850A.html) is recommended. These rules allow VMs assigned the etcd and control-plane roles to operate on separate ESXi hosts when they are assigned to different node pools. This practice ensures that the failure of a single physical machine does not affect the availability of those planes. - - -### B. Configure Kubernetes Options -{{}} - -### C. Add Node Pools to the Cluster -{{}} - -### D. Optional: Add a Self-Healing Node Pool - -To make a node pool self-healing, enter a number greater than zero in the **Auto Replace** column. Rancher will use the node template for the given node pool to recreate the node if it becomes inactive for that number of minutes. - -> **Note:** Self-healing node pools are designed to help you replace worker nodes for stateless applications. It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. - -### E. Create the Cluster - -Click **Create** to start provisioning the VMs and Kubernetes services. - -{{< result_create-cluster >}} - -# 3. Optional: Provision Storage - -For an example of how to provision storage in vSphere using Rancher, refer to the - [cluster administration section.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere) - - In order to provision storage in vSphere, the vSphere provider must be enabled. - -### Enable the vSphere Cloud Provider for the Cluster - -1. Set **Cloud Provider** option to `Custom`. - - {{< img "/img/rancher/vsphere-node-driver-cloudprovider.png" "vsphere-node-driver-cloudprovider">}} - -1. Click on **Edit as YAML** -1. Insert the following structure to the pre-populated cluster YAML. As of Rancher v2.3+, this structure must be placed under `rancher_kubernetes_engine_config`. In versions prior to v2.3, it has to be defined as a top-level field. Note that the `name` *must* be set to `vsphere`. - - ```yaml - rancher_kubernetes_engine_config: # Required as of Rancher v2.3+ - cloud_provider: - name: vsphere - vsphereCloudProvider: - [Insert provider configuration] - ``` - - Rancher uses RKE (the Rancher Kubernetes Engine) to provision Kubernetes clusters. Refer to the [vSphere configuration reference in the RKE documentation]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/) for details about the properties of the `vsphereCloudProvider` directive. - - -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/creating-credentials/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/creating-credentials/_index.md deleted file mode 100644 index 9c5bc71a0e2..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/creating-credentials/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Creating Credentials in the vSphere Console -weight: 1 ---- - -This section describes how to create a vSphere username and password. You will need to provide these vSphere credentials to Rancher, which allows Rancher to provision resources in vSphere. - -The following table lists the permissions required for the vSphere user account: - -| Privilege Group | Operations | -|:----------------------|:-----------------------------------------------------------------------| -| Datastore | AllocateSpace
Browse
FileManagement (Low level file operations)
UpdateVirtualMachineFiles
UpdateVirtualMachineMetadata | -| Network | Assign | -| Resource | AssignVMToPool | -| Virtual Machine | Config (All)
GuestOperations (All)
Interact (All)
Inventory (All)
Provisioning (All) | - -The following steps create a role with the required privileges and then assign it to a new user in the vSphere console: - -1. From the **vSphere** console, go to the **Administration** page. - -2. Go to the **Roles** tab. - -3. Create a new role. Give it a name and select the privileges listed in the permissions table above. - - {{< img "/img/rancher/rancherroles1.png" "image" >}} - -4. Go to the **Users and Groups** tab. - -5. Create a new user. Fill out the form and then click **OK**. Make sure to note the username and password, because you will need it when configuring node templates in Rancher. - - {{< img "/img/rancher/rancheruser.png" "image" >}} - -6. Go to the **Global Permissions** tab. - -7. Create a new Global Permission. Add the user you created earlier and assign it the role you created earlier. Click **OK**. - - {{< img "/img/rancher/globalpermissionuser.png" "image" >}} - - {{< img "/img/rancher/globalpermissionrole.png" "image" >}} - -**Result:** You now have credentials that Rancher can use to manipulate vSphere resources. diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/enabling-uuids/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/enabling-uuids/_index.md deleted file mode 100644 index 2388ad4e8ad..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/enabling-uuids/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Enabling Disk UUIDs in Node Templates -weight: 3 ---- - -As of Rancher v2.0.4, disk UUIDs are enabled in vSphere node templates by default. - -For Rancher prior to v2.0.4, we recommend configuring a vSphere node template to automatically enable disk UUIDs because they are required for Rancher to manipulate vSphere resources. - -To enable disk UUIDs for all VMs created for a cluster, - -1. Navigate to the **Node Templates** in the Rancher UI while logged in as an administrator. - -2. Add or edit an existing vSphere node template. - -3. Under **Instance Options** click on **Add Parameter**. - -4. Enter `disk.enableUUID` as key with a value of **TRUE**. - - {{< img "/img/rke/vsphere-nodedriver-enable-uuid.png" "vsphere-nodedriver-enable-uuid" >}} - -5. Click **Create** or **Save**. - -**Result:** The disk UUID is enabled in the vSphere node template. diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/node-template-reference/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/node-template-reference/_index.md deleted file mode 100644 index adf7cdbe8d4..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/infrastructure-provider/vsphere/provisioning-vsphere-clusters/node-template-reference/_index.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: vSphere Node Template Configuration Reference -weight: 4 ---- - -The tables below describe the configuration options available in the vSphere node template: - -- [Account access](#account-access) -- [Instance options](#instance-options) -- [Scheduling options](#scheduling-options) - -# Account Access - -The account access parameters are different based on the Rancher version. - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -| Parameter | Required | Description | -|:----------------------|:--------:|:-----| -| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) | - -{{% /tab %}} -{{% tab "Rancher prior to v2.2.0" %}} - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| vCenter or ESXi Server | * | IP or FQDN of the vCenter or ESXi server used for managing VMs. | -| Port | * | Port to use when connecting to the server. Defaults to `443`. | -| Username | * | vCenter/ESXi user to authenticate with the server. | -| Password | * | User's password. | - -{{% /tab %}} -{{% /tabs %}} - -# Instance Options - -The options for creating and configuring an instance are different depending on your Rancher version. - -{{% tabs %}} -{{% tab "Rancher v2.3.3+" %}} - -| Parameter | Required | Description | -|:----------------|:--------:|:-----------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Creation method | * | The method for setting up an operating system on the node. The operating system can be installed from an ISO or from a VM template. Depending on the creation method, you will also have to specify a VM template, content library, existing VM, or ISO. For more information on creation methods, refer to the section on [configuring instances.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#c-configure-instances-and-operating-systems) | -| Cloud Init | | URL of a `cloud-config.yml` file or URL to provision VMs with. This file allows further customization of the operating system, such as network configuration, DNS servers, or system daemons. The operating system must support `cloud-init`. | -| Networks | | Name(s) of the network to attach the VM to. | -| Configuration Parameters used for guestinfo | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - -{{% /tab %}} -{{% tab "Rancher prior to v2.3.3" %}} - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| -| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - -{{% /tab %}} -{{% /tabs %}} - -# Scheduling Options -The options for scheduling VMs to a hypervisor are different depending on your Rancher version. -{{% tabs %}} -{{% tab "Rancher v2.3.3+" %}} - -| Parameter | Required | Description | -|:------------------------|:--------:|:-------| -| Data Center | * | Name/path of the datacenter to create VMs in. | -| Resource Pool | | Name of the resource pool to schedule the VMs in. Leave blank for standalone ESXi. If not specified, the default resource pool is used. | -| Data Store | * | If you have a data store cluster, you can toggle the **Data Store** field. This lets you select a data store cluster where your VM will be scheduled to. If the field is not toggled, you can select an individual disk. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | -| Host | | The IP of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | - -{{% /tab %}} -{{% tab "Rancher prior to v2.3.3" %}} - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| Data Center | * | Name/path of the datacenter to create VMs in. | -| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | -| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | -| Network | * | Name of the VM network to attach VMs to. | -| Data Store | * | Datastore to store the VM disks. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | -{{% /tab %}} -{{% /tabs %}} \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/node-requirements/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/node-requirements/_index.md deleted file mode 100644 index 36fca204d9f..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/node-requirements/_index.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: Node Requirements for Rancher Managed Clusters -weight: 1 ---- - -This page describes the requirements for the nodes where your apps and services will be installed. - -In this section, "user cluster" refers to a cluster running your apps, which should be separate from the cluster (or single node) running Rancher. - -> If Rancher is installed on a high-availability Kubernetes cluster, the Rancher server cluster and user clusters have different requirements. For Rancher installation requirements, refer to the node requirements in the [installation section.]({{}}/rancher/v2.x/en/installation/requirements/) - -Make sure the nodes for the Rancher server fulfill the following requirements: - -- [Operating systems and container runtime requirements](#operating-systems-and-container-runtime-requirements) -- [Hardware Requirements](#hardware-requirements) -- [Networking Requirements](#networking-requirements) -- [Optional: Security Considerations](#optional-security-considerations) - -# Operating Systems and Docker Requirements - -Rancher should work with any modern Linux distribution and any modern Docker version. Linux is required for the etcd and controlplane nodes of all downstream clusters. Worker nodes may run Linux or [Windows Server.](#requirements-for-windows-nodes) The capability to use Windows worker nodes in downstream clusters was added in Rancher v2.3.0. - -Rancher works has been tested and is supported with downstream clusters running Ubuntu, CentOS, Oracle Linux, RancherOS, and RedHat Enterprise Linux. For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -All supported operating systems are 64-bit x86. - -If you plan to use ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.x/en/installation/options/arm64-platform/) - -For information on how to install Docker, refer to the official [Docker documentation.](https://docs.docker.com/) - -Some distributions of Linux derived from RHEL, including Oracle Linux, may have default firewall rules that block communication with Helm. This [how-to guide]({{}}/rancher/v2.x/en/installation/options/firewall) shows how to check the default firewall rules and how to open the ports with `firewalld` if necessary. - -SUSE Linux may have a firewall that blocks all ports by default. In that situation, follow [these steps](#opening-suse-linux-ports) to open the ports needed for adding a host to a custom cluster. - -### Requirements for Windows Nodes - -_Windows worker nodes can be used as of Rancher v2.3.0_ - -Nodes with Windows Server must run Docker Enterprise Edition. - -Windows nodes can be used for worker nodes only. See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) - -# Hardware Requirements - -The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. - -Regarding CPU and memory, it is recommended that the different planes of Kubernetes clusters (etcd, controlplane, and workers) should be hosted on different nodes so that they can scale separately from each other. - -For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters.](https://kubernetes.io/docs/setup/best-practices/cluster-large/) - -For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.4.0/op-guide/hardware/) - -# Networking Requirements - -For a production cluster, we recommend that you restrict traffic by opening only the ports defined in the port requirements below. - -The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-options). - -For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.]({{}}/rke/latest/en/os/#ports) - -Details on which ports are used in each situation are found in the following sections: - -- [Commonly used ports](#commonly-used-ports) -- [Port requirements for custom clusters](#port-requirements-for-custom-clusters) -- [Port requirements for clusters hosted by an infrastructure provider](#port-requirements-for-clusters-hosted-by-an-infrastructure-provider) - - [Security group for nodes on AWS EC2](#security-group-for-nodes-on-aws-ec2) -- [Port requirements for clusters hosted by a Kubernetes provider](#port-requirements-for-clusters-hosted-by-a-kubernetes-provider) -- [Port requirements for imported clusters](#port-requirements-for-imported-clusters) -- [Port requirements for local traffic](#port-requirements-for-local-traffic) - -### Commonly Used Ports - -If security isn't a large concern and you're okay with opening a few additional ports, you can use this table as your port reference instead of the comprehensive tables in the following sections. - -These ports are typically opened on your Kubernetes nodes, regardless of what type of cluster it is. - -
Commonly Used Ports Reference
- -| Protocol | Port | Description | -|:--------: |:----------------: |------------------------------------------------- | -| TCP | 22 | Node driver SSH provisioning | -| TCP | 2376 | Node driver Docker daemon TLS port | -| TCP | 2379 | etcd client requests | -| TCP | 2380 | etcd peer communication | -| UDP | 8472 | Canal/Flannel VXLAN overlay networking | -| UDP | 4789 | Flannel VXLAN overlay networking on Windows cluster | -| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | -| TCP | 6783 | Weave Port | -| UDP | 6783-6784 | Weave UDP Ports | -| TCP | 10250 | kubelet API | -| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | -| TCP/UDP | 30000-
32767 | NodePort port range | - -### Port Requirements for Custom Clusters - -If you are launching a Kubernetes cluster on your existing infrastructure, refer to these port requirements. - -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with [custom nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/). - -{{< ports-custom-nodes >}} - -### Port Requirements for Clusters Hosted by an Infrastructure Provider - -If you are launching a Kubernetes cluster on nodes that are in an infrastructure provider such as Amazon EC2, Google Container Engine, DigitalOcean, Azure, or vSphere, these port requirements apply. - -These required ports are automatically opened by Rancher during creation of clusters using cloud providers. - -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with nodes created in an [Infrastructure Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). - ->**Note:** ->The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. - -{{< ports-iaas-nodes >}} - -#### Security Group for Nodes on AWS EC2 - -When using the [AWS EC2 node driver]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. - -| Type | Protocol | Port Range | Source/Destination | Rule Type | -|-----------------|:--------:|:-----------:|------------------------|:---------:| -| SSH | TCP | 22 | 0.0.0.0/0 | Inbound | -| HTTP | TCP | 80 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 443 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 2376 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 2379-2380 | sg-xxx (rancher-nodes) | Inbound | -| Custom UDP Rule | UDP | 4789 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 6443 | 0.0.0.0/0 | Inbound | -| Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | -| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | -| All traffic | All | All | 0.0.0.0/0 | Outbound | - -### Port Requirements for Clusters Hosted by a Kubernetes Provider - -If you are launching a cluster with a [hosted Kubernetes provider]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters). such as Google Kubernetes Engine, Amazon EKS, or Azure Kubernetes Service, refer to these port requirements. - -{{< ports-imported-hosted >}} - - -### Port Requirements for Imported Clusters - -The following table depicts the port requirements for [imported clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/). - -{{< ports-imported-hosted >}} - - -### Port Requirements for Local Traffic - -Ports marked as `local traffic` (i.e., `9099 TCP`) in the port requirements are used for Kubernetes healthchecks (`livenessProbe` and`readinessProbe`). -These healthchecks are executed on the node itself. In most cloud environments, this local traffic is allowed by default. - -However, this traffic may be blocked when: - -- You have applied strict host firewall policies on the node. -- You are using nodes that have multiple interfaces (multihomed). - -In these cases, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as source or destination in your security group, explicitly opening ports only applies to the private interface of the nodes/instances. - -# Optional: Security Considerations - -If you want to provision a Kubernetes cluster that is compliant with the CIS (Center for Internet Security) Kubernetes Benchmark, we recommend to following our hardening guide to configure your nodes before installing Kubernetes. - -For more information on the hardening guide and details on which version of the guide corresponds to your Rancher and Kubernetes versions, refer to the [security section.]({{}}/rancher/v2.x/en/security/#rancher-hardening-guide) - -# Opening SUSE Linux Ports - -SUSE Linux may have a firewall that blocks all ports by default. To open the ports needed for adding the host to a custom cluster, - -1. SSH into the instance. -1. Edit /`etc/sysconfig/SuSEfirewall2` and open the required ports. In this example, ports 9796 and 10250 are also opened for monitoring: - ``` - FW_SERVICES_EXT_TCP="22 80 443 2376 2379 2380 6443 9099 9796 10250 10254 30000:32767" - FW_SERVICES_EXT_UDP="8472 30000:32767" - FW_ROUTE=yes - ``` -1. Restart the firewall with the new ports: - ``` - SuSEfirewall2 - ``` - -**Result:** The node has the open ports required to be added to a custom cluster. - diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/rancher-agents/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/rancher-agents/_index.md deleted file mode 100644 index 9fdc10e9ff6..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/rancher-agents/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Rancher Agents -weight: 6 ---- - -There are two different agent resources deployed on Rancher managed clusters: - -- [cattle-cluster-agent](#cattle-cluster-agent) -- [cattle-node-agent](#cattle-node-agent) - -For a conceptual overview of how the Rancher server provisions clusters and communicates with them, refer to the [architecture]({{}}/rancher/v2.x/en/overview/architecture/) - -### cattle-cluster-agent - -The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. The `cattle-cluster-agent` is deployed using a Deployment resource. - -### cattle-node-agent - -The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters when `cattle-cluster-agent` is unavailable. - -> **Note:** In Rancher v2.2.4 and lower, the `cattle-node-agent` pods did not tolerate all taints, causing Kubernetes upgrades to fail on these nodes. The fix for this has been included in Rancher v2.2.5 and higher. - -### Scheduling rules - -_Applies to v2.3.0 and higher_ - -| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | -| ---------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | -| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | -| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | - -The `cattle-cluster-agent` Deployment has preferred scheduling rules using `requiredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) to find more information about scheduling rules. - -The `requiredDuringSchedulingIgnoredDuringExecution` configuration is shown in the table below: - -| Weight | Expression | -| ------ | ------------------------------------------------ | -| 100 | `node-role.kubernetes.io/controlplane:In:"true"` | -| 1 | `node-role.kubernetes.io/etcd:In:"true"` | diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/windows-clusters/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/windows-clusters/_index.md deleted file mode 100644 index 434407c8b9b..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/windows-clusters/_index.md +++ /dev/null @@ -1,282 +0,0 @@ ---- -title: Launching Kubernetes on Windows Clusters -weight: 4 ---- - -When provisioning a [custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes custom cluster on your existing infrastructure. - -You can use a mix of Linux and Windows hosts as your cluster nodes. Windows nodes can only be used for deploying workloads, while Linux nodes are required for cluster management. - -You can only add Windows nodes to a cluster if Windows support is enabled. Windows support can be enabled for new custom clusters that use Kubernetes 1.15+ and the Flannel network provider. Windows support cannot be enabled for existing clusters. - -> Windows clusters have more requirements than Linux clusters. For example, Windows nodes must have 50 GB of disk space. Make sure your Windows cluster fulfills all of the [requirements.](#requirements-for-windows-clusters) - -For a summary of Kubernetes features supported in Windows, see the Kubernetes documentation on [supported functionality and limitations for using Kubernetes with Windows](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations) or the [guide for scheduling Windows containers in Kubernetes](https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/). - -This guide covers the following topics: - - - -- [Requirements](#requirements-for-windows-clusters) - - [OS and Docker](#os-and-docker-requirements) - - [Nodes](#node-requirements) - - [Networking](#networking-requirements) - - [Architecture](#architecture-requirements) - - [Containers](#container-requirements) - - [Cloud Providers](#cloud-providers) -- [Tutorial: How to Create a Cluster with Windows Support](#tutorial-how-to-create-a-cluster-with-windows-support) -- [Configuration for Storage Classes in Azure](#configuration-for-storage-classes-in-azure) - - -# Requirements for Windows Clusters - -For a custom cluster, the general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation]({{}}/rancher/v2.x/en/installation/requirements/). - -### OS and Docker Requirements - -In order to add Windows worker nodes to a cluster, the node must be running one of the following Windows Server versions and the corresponding version of Docker Engine - Enterprise Edition (EE): - -- Nodes with Windows Server core version 1809 should use Docker EE-basic 18.09 or Docker EE-basic 19.03. -- Nodes with Windows Server core version 1903 should use Docker EE-basic 19.03. - -> **Notes:** -> -> - If you are using AWS, Rancher recommends _Microsoft Windows Server 2019 Base with Containers_ as the Amazon Machine Image (AMI). -> - If you are using GCE, Rancher recommends _Windows Server 2019 Datacenter for Containers_ as the OS image. - -### Node Requirements - -The hosts in the cluster need to have at least: - -- 2 core CPUs -- 5 GB memory -- 50 GB disk space - -Rancher will not provision the node if the node does not meet these requirements. - -### Networking Requirements - -Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation]({{}}/rancher/v2.x/en/installation/) before proceeding with this guide. - -Rancher only supports Windows using Flannel as the network provider. - -There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. - -For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. - -For **VXLAN (Overlay)** networking, the [KB4489899](https://support.microsoft.com/en-us/help/4489899) hotfix must be installed. Most cloud-hosted VMs already have this hotfix. - -### Architecture Requirements - -The Kubernetes cluster management nodes (`etcd` and `controlplane`) must be run on Linux nodes. - -The `worker` nodes, which is where your workloads will be deployed on, will typically be Windows nodes, but there must be at least one `worker` node that is run on Linux in order to run the Rancher cluster agent, DNS, metrics server, and Ingress related containers. - -We recommend the minimum three-node architecture listed in the table below, but you can always add additional Linux and Windows workers to scale up your cluster for redundancy: - - - -| Node | Operating System | Kubernetes Cluster Role(s) | Purpose | -| ------ | --------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| Node 1 | Linux (Ubuntu Server 18.04 recommended) | [Control Plane]({{}}/rancher/v2.x/en/cluster-provisioning/#control-plane-nodes), [etcd]({{}}/rancher/v2.x/en/cluster-provisioning/#etcd-nodes), [Worker]({{}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Manage the Kubernetes cluster | -| Node 2 | Linux (Ubuntu Server 18.04 recommended) | [Worker]({{}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Support the Rancher Cluster agent, Metrics server, DNS, and Ingress for the cluster | -| Node 3 | Windows (Windows Server core version 1809 or above) | [Worker]({{}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Run your Windows containers | - -### Container Requirements - -Windows requires that containers must be built on the same Windows Server version that they are being deployed on. Therefore, containers must be built on Windows Server core version 1809 or above. If you have existing containers built for an earlier Windows Server core version, they must be re-built on Windows Server core version 1809 or above. - -### Cloud Providers - -If you set a Kubernetes cloud provider in your cluster, some additional steps are required. You might want to set a cloud provider if you want to want to leverage a cloud provider's capabilities, for example, to automatically provision storage, load balancers, or other infrastructure for your cluster. Refer to [this page]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) for details on how to configure a cloud provider cluster of nodes that meet the prerequisites. - -If you are using the GCE (Google Compute Engine) cloud provider, you must do the following: - -- Enable the GCE cloud provider in the `cluster.yml` by following [these steps.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/gce) -- When provisioning the cluster in Rancher, choose **Custom cloud provider** as the cloud provider in the Rancher UI. - -# Tutorial: How to Create a Cluster with Windows Support - -This tutorial describes how to create a Rancher-provisioned cluster with the three nodes in the [recommended architecture.](#guide-architecture) - -When you provision a custom cluster with Rancher, you will add nodes to the cluster by installing the [Rancher agent]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/) on each one. When you create or edit your cluster from the Rancher UI, you will see a **Customize Node Run Command** that you can run on each server to add it to your custom cluster. - -To set up a custom cluster with support for Windows nodes and containers, you will need to complete the tasks below. - - - -1. [Provision Hosts](#1-provision-hosts) -1. [Create the Custom Cluster](#2-create-the-custom-cluster) -1. [Add Nodes to the Cluster](#3-add-nodes-to-the-cluster) -1. [Optional: Configuration for Azure Files](#5-optional-configuration-for-azure-files) - - -# 1. Provision Hosts - -To begin provisioning a custom cluster with Windows support, prepare your hosts. - -Your hosts can be: - -- Cloud-hosted VMs -- VMs from virtualization clusters -- Bare-metal servers - -You will provision three nodes: - -- One Linux node, which manages the Kubernetes control plane and stores your `etcd` -- A second Linux node, which will be another worker node -- The Windows node, which will run your Windows containers as a worker node - -| Node | Operating System | -| ------ | ------------------------------------------------------------ | -| Node 1 | Linux (Ubuntu Server 18.04 recommended) | -| Node 2 | Linux (Ubuntu Server 18.04 recommended) | -| Node 3 | Windows (Windows Server core version 1809 or above required) | - -If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) - -# 2. Create the Custom Cluster - -The instructions for creating a custom cluster that supports Windows nodes are very similar to the general [instructions for creating a custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#2-create-the-custom-cluster) with some Windows-specific requirements. - -Windows support only be enabled if the cluster uses Kubernetes v1.15+ and the Flannel network provider. - -1. From the **Global** view, click on the **Clusters** tab and click **Add Cluster**. - -1. Click **From existing nodes (Custom)**. - -1. Enter a name for your cluster in the **Cluster Name** text box. - -1. In the **Kubernetes Version** dropdown menu, select v1.15 or above. - -1. In the **Network Provider** field, select **Flannel.** - -1. In the **Windows Support** section, click **Enable.** - -1. Optional: After you enable Windows support, you will be able to choose the Flannel backend. There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. - -1. Click **Next**. - -> **Important:** For Host Gateway (L2bridge) networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. - -# 3. Add Nodes to the Cluster - -This section describes how to register your Linux and Worker nodes to your custom cluster. - -### Add Linux Master Node - -The first node in your cluster should be a Linux host has both the **Control Plane** and **etcd** roles. At a minimum, both of these roles must be enabled for this node, and this node must be added to your cluster before you can add Windows hosts. - -In this section, we fill out a form on the Rancher UI to get a custom command to install the Rancher agent on the Linux master node. Then we will copy the command and run it on our Linux master node to register the node in the cluster. - -1. In the **Node Operating System** section, click **Linux**. - -1. In the **Node Role** section, choose at least **etcd** and **Control Plane**. We recommend selecting all three. - -1. Optional: If you click **Show advanced options,** you can customize the settings for the [Rancher agent]({{}}/rancher/v2.x/en/admin-settings/agent-options/) and [node labels.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) - -1. Copy the command displayed on the screen to your clipboard. - -1. SSH into your Linux host and run the command that you copied to your clipboard. - -1. When you are finished provisioning your Linux node(s), select **Done**. - -{{< result_create-cluster >}} - -It may take a few minutes for the node to be registered in your cluster. - -### Add Linux Worker Node - -After the initial provisioning of your custom cluster, your cluster only has a single Linux host. Next, we add another Linux `worker` host, which will be used to support _Rancher cluster agent_, _Metrics server_, _DNS_ and _Ingress_ for your cluster. - -1. From the **Global** view, click **Clusters.** - -1. Go to the custom cluster that you created and click **⋮ > Edit.** - -1. Scroll down to **Node Operating System**. Choose **Linux**. - -1. In the **Customize Node Run Command** section, go to the **Node Options** and select the **Worker** role. - -1. Copy the command displayed on screen to your clipboard. - -1. Log in to your Linux host using a remote Terminal connection. Run the command copied to your clipboard. - -1. From **Rancher**, click **Save**. - -**Result:** The **Worker** role is installed on your Linux host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. - -> **Note:** Taints on Linux Worker Nodes -> -> For each Linux worker node added into the cluster, the following taints will be added to Linux worker node. By adding this taint to the Linux worker node, any workloads added to the windows cluster will be automatically scheduled to the Windows worker node. If you want to schedule workloads specifically onto the Linux worker node, you will need to add tolerations to those workloads. - -> | Taint Key | Taint Value | Taint Effect | -> | -------------- | ----------- | ------------ | -> | `cattle.io/os` | `linux` | `NoSchedule` | - -### Add a Windows Worker Node - -You can add Windows hosts to a custom cluster by editing the cluster and choosing the **Windows** option. - -1. From the **Global** view, click **Clusters.** - -1. Go to the custom cluster that you created and click **⋮ > Edit.** - -1. Scroll down to **Node Operating System**. Choose **Windows**. Note: You will see that the **worker** role is the only available role. - -1. Copy the command displayed on screen to your clipboard. - -1. Log in to your Windows host using your preferred tool, such as [Microsoft Remote Desktop](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients). Run the command copied to your clipboard in the **Command Prompt (CMD)**. - -1. From Rancher, click **Save**. - -1. Optional: Repeat these instructions if you want to add more Windows nodes to your cluster. - -**Result:** The **Worker** role is installed on your Windows host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. You now have a Windows Kubernetes cluster. - -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through the Rancher server. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. - -# Configuration for Storage Classes in Azure - -If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a [storage class]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/#adding-storage-classes) for the cluster. - -In order to have the Azure platform create the required storage resources, follow these steps: - -1. [Configure the Azure cloud provider.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/#azure) - -1. Configure `kubectl` to connect to your cluster. - -1. Copy the `ClusterRole` and `ClusterRoleBinding` manifest for the service account: - - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: system:azure-cloud-provider - rules: - - apiGroups: [''] - resources: ['secrets'] - verbs: ['get','create'] - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: system:azure-cloud-provider - roleRef: - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io - name: system:azure-cloud-provider - subjects: - - kind: ServiceAccount - name: persistent-volume-binder - namespace: kube-system - -1. Create these in your cluster using one of the follow command. - - ``` - # kubectl create -f - ``` diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md deleted file mode 100644 index 8cc045883ff..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: v2.1.x and v2.2.x Windows Documentation (Experimental) -weight: 9100 ---- - -_Available from v2.1.0 to v2.1.9 and v2.2.0 to v2.2.3_ - -This section describes how to provision Windows clusters in Rancher v2.1.x and v2.2.x. If you are using Rancher v2.3.0 or later, please refer to the new documentation for [v2.3.0 or later]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/). - -When you create a [custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes), Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes cluster on your existing infrastructure. - -You can provision a custom Windows cluster using Rancher by using a mix of Linux and Windows hosts as your cluster nodes. - ->**Important:** In versions of Rancher prior to v2.3, support for Windows nodes is experimental. Therefore, it is not recommended to use Windows nodes for production environments if you are using Rancher prior to v2.3. - -This guide walks you through create of a custom cluster that includes three nodes: - -- A Linux node, which serves as a Kubernetes control plane node -- Another Linux node, which serves as a Kubernetes worker used to support Ingress for the cluster -- A Windows node, which is assigned the Kubernetes worker role and runs your Windows containers - -For a summary of Kubernetes features supported in Windows, see [Using Windows in Kubernetes](https://kubernetes.io/docs/setup/windows/intro-windows-in-kubernetes/). - -## OS and Container Requirements - -- For clusters provisioned with Rancher v2.1.x and v2.2.x, containers must run on Windows Server 1809 or above. -- You must build containers on a Windows Server core version 1809 or above to run these containers on the same server version. - -## Objectives for Creating Cluster with Windows Support - -When setting up a custom cluster with support for Windows nodes and containers, complete the series of tasks below. - - - -- [1. Provision Hosts](#1-provision-hosts) -- [2. Cloud-host VM Networking Configuration](#2-cloud-hosted-vm-networking-configuration) -- [3. Create the Custom Cluster](#3-create-the-custom-cluster) -- [4. Add Linux Host for Ingress Support](#4-add-linux-host-for-ingress-support) -- [5. Adding Windows Workers](#5-adding-windows-workers) -- [6. Cloud-host VM Routes Configuration](#6-cloud-hosted-vm-routes-configuration) - - - -## 1. Provision Hosts - -To begin provisioning a custom cluster with Windows support, prepare your host servers. Provision three nodes according to our [requirements]({{}}/rancher/v2.x/en/installation/requirements/)—two Linux, one Windows. Your hosts can be: - -- Cloud-hosted VMs -- VMs from virtualization clusters -- Bare-metal servers - -The table below lists the [Kubernetes roles]({{}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) you'll assign to each host, although you won't enable these roles until further along in the configuration process—we're just informing you of each node's purpose. The first node, a Linux host, is primarily responsible for managing the Kubernetes control plane, although, in this use case, we're installing all three roles on this node. Node 2 is also a Linux worker, which is responsible for Ingress support. Finally, the third node is your Windows worker, which will run your Windows applications. - -Node | Operating System | Future Cluster Role(s) ---------|------------------|------ -Node 1 | Linux (Ubuntu Server 16.04 recommended) | [Control Plane]({{}}/rancher/v2.x/en/cluster-provisioning/#control-plane-nodes), [etcd]({{}}/rancher/v2.x/en/cluster-provisioning/#etcd), [Worker]({{}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) -Node 2 | Linux (Ubuntu Server 16.04 recommended) | [Worker]({{}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) (This node is used for Ingress support) -Node 3 | Windows (Windows Server core version 1809 or above) | [Worker]({{}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) - -### Requirements - -- You can view node requirements for Linux and Windows nodes in the [installation section]({{}}/rancher/v2.x/en/installation/requirements/). -- All nodes in a virtualization cluster or a bare metal cluster must be connected using a layer 2 network. -- To support [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), your cluster must include at least one Linux node dedicated to the worker role. -- Although we recommend the three node architecture listed in the table above, you can add additional Linux and Windows workers to scale up your cluster for redundancy. - - -## 2. Cloud-hosted VM Networking Configuration - ->**Note:** This step only applies to nodes hosted on cloud-hosted virtual machines. If you're using virtualization clusters or bare-metal servers, skip ahead to [Create the Custom Cluster](#3-create-the-custom-cluster). - -If you're hosting your nodes on any of the cloud services listed below, you must disable the private IP address checks for both your Linux or Windows hosts on startup. To disable this check for each node, follow the directions provided by each service below. - -Service | Directions to disable private IP address checks ---------|------------------------------------------------ -Amazon EC2 | [Disabling Source/Destination Checks](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) -Google GCE | [Enabling IP Forwarding for Instances](https://cloud.google.com/vpc/docs/using-routes#canipforward) -Azure VM | [Enable or Disable IP Forwarding](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-network-interface#enable-or-disable-ip-forwarding) - -## 3. Create the Custom Cluster - -To create a custom cluster that supports Windows nodes, follow the instructions in [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#2-create-the-custom-cluster), starting from [2. Create the Custom Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#2-create-the-custom-cluster). While completing the linked instructions, look for steps that requires special actions for Windows nodes, which are flagged with a note. These notes will link back here, to the special Windows instructions listed in the subheadings below. - - -### Enable the Windows Support Option - -While choosing **Cluster Options**, set **Windows Support (Experimental)** to **Enabled**. - -After you select this option, resume [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#create-the-custom-cluster) from [step 6]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#step-6). - -### Networking Option - -When choosing a network provider for a cluster that supports Windows, the only option available is Flannel, as [host-gw](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) is needed for IP routing. - -If your nodes are hosted by a cloud provider and you want automation support such as load balancers or persistent storage devices, see [Selecting Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) for configuration info. - -### Node Configuration - -The first node in your cluster should be a Linux host that fills the Control Plane role. This role must be fulfilled before you can add Windows hosts to your cluster. At minimum, the node must have this role enabled, but we recommend enabling all three. The following table lists our recommended settings (we'll provide the recommended settings for nodes 2 and 3 later). - -Option | Setting --------|-------- -Node Operating System | Linux -Node Roles | etcd
Control Plane
Worker - -When you're done with these configurations, resume [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#create-the-custom-cluster) from [step 8]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#step-8). - - - -## 4. Add Linux Host for Ingress Support - -After the initial provisioning of your custom cluster, your cluster only has a single Linux host. Add another Linux host, which will be used to support Ingress for your cluster. - -1. Using the content menu, open the custom cluster your created in [2. Create the Custom Cluster](#2-create-the-custom-cluster). - -1. From the main menu, select **Nodes**. - -1. Click **Edit Cluster**. - -1. Scroll down to **Node Operating System**. Choose **Linux**. - -1. Select the **Worker** role. - -1. Copy the command displayed on screen to your clipboard. - -1. Log in to your Linux host using a remote Terminal connection. Run the command copied to your clipboard. - -1. From **Rancher**, click **Save**. - -**Result:** The worker role is installed on your Linux host, and the node registers with Rancher. - -## 5. Adding Windows Workers - -You can add Windows hosts to a custom cluster by editing the cluster and choosing the **Windows** option. - -1. From the main menu, select **Nodes**. - -1. Click **Edit Cluster**. - -1. Scroll down to **Node Operating System**. Choose **Windows**. - -1. Select the **Worker** role. - -1. Copy the command displayed on screen to your clipboard. - -1. Log in to your Windows host using your preferred tool, such as [Microsoft Remote Desktop](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients). Run the command copied to your clipboard in the **Command Prompt (CMD)**. - -1. From Rancher, click **Save**. - -1. **Optional:** Repeat these instruction if you want to add more Windows nodes to your cluster. - -**Result:** The worker role is installed on your Windows host, and the node registers with Rancher. - -## 6. Cloud-hosted VM Routes Configuration - -In Windows clusters, containers communicate with each other using the `host-gw` mode of Flannel. In `host-gw` mode, all containers on the same node belong to a private subnet, and traffic routes from a subnet on one node to a subnet on another node through the host network. - -- When worker nodes are provisioned on AWS, virtualization clusters, or bare metal servers, make sure they belong to the same layer 2 subnet. If the nodes don't belong to the same layer 2 subnet, `host-gw` networking will not work. - -- When worker nodes are provisioned on GCE or Azure, they are not on the same layer 2 subnet. Nodes on GCE and Azure belong to a routable layer 3 network. Follow the instructions below to configure GCE and Azure so that the cloud network knows how to route the host subnets on each node. - -To configure host subnet routing on GCE or Azure, first run the following command to find out the host subnets on each worker node: - -```bash -kubectl get nodes -o custom-columns=nodeName:.metadata.name,nodeIP:status.addresses[0].address,routeDestination:.spec.podCIDR -``` - -Then follow the instructions for each cloud provider to configure routing rules for each node: - -Service | Instructions ---------|------------- -Google GCE | For GCE, add a static route for each node: [Adding a Static Route](https://cloud.google.com/vpc/docs/using-routes#addingroute). -Azure VM | For Azure, create a routing table: [Custom Routes: User-defined](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-udr-overview#user-defined). - - -` ` diff --git a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/windows-clusters/host-gateway-requirements/_index.md b/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/windows-clusters/host-gateway-requirements/_index.md deleted file mode 100644 index ee075c394de..00000000000 --- a/content/rancher/v2.5/en/ecm/setting-up-k8s/rke-clusters/windows-clusters/host-gateway-requirements/_index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Networking Requirements for Host Gateway (L2bridge) -weight: 1000 ---- - -This section describes how to configure custom Windows clusters that are using *Host Gateway (L2bridge)* mode. - -### Disabling Private IP Address Checks - -If you are using *Host Gateway (L2bridge)* mode and hosting your nodes on any of the cloud services listed below, you must disable the private IP address checks for both your Linux or Windows hosts on startup. To disable this check for each node, follow the directions provided by each service below. - -Service | Directions to disable private IP address checks ---------|------------------------------------------------ -Amazon EC2 | [Disabling Source/Destination Checks](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) -Google GCE | [Enabling IP Forwarding for Instances](https://cloud.google.com/vpc/docs/using-routes#canipforward) (By default, a VM cannot forward a packet originated by another VM) -Azure VM | [Enable or Disable IP Forwarding](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-network-interface#enable-or-disable-ip-forwarding) - -### Cloud-hosted VM Routes Configuration - -If you are using the [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) backend of Flannel, all containers on the same node belong to a private subnet, and traffic routes from a subnet on one node to a subnet on another node through the host network. - -- When worker nodes are provisioned on AWS, virtualization clusters, or bare metal servers, make sure they belong to the same layer 2 subnet. If the nodes don't belong to the same layer 2 subnet, `host-gw` networking will not work. - -- When worker nodes are provisioned on GCE or Azure, they are not on the same layer 2 subnet. Nodes on GCE and Azure belong to a routable layer 3 network. Follow the instructions below to configure GCE and Azure so that the cloud network knows how to route the host subnets on each node. - -To configure host subnet routing on GCE or Azure, first run the following command to find out the host subnets on each worker node: - -```bash -kubectl get nodes -o custom-columns=nodeName:.metadata.name,nodeIP:status.addresses[0].address,routeDestination:.spec.podCIDR -``` - -Then follow the instructions for each cloud provider to configure routing rules for each node: - -Service | Instructions ---------|------------- -Google GCE | For GCE, add a static route for each node: [Adding a Static Route](https://cloud.google.com/vpc/docs/using-routes#addingroute). -Azure VM | For Azure, create a routing table: [Custom Routes: User-defined](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-udr-overview#user-defined). diff --git a/content/rancher/v2.5/en/ecm/upgrading-kubernetes/_index.md b/content/rancher/v2.5/en/ecm/upgrading-kubernetes/_index.md deleted file mode 100644 index f75848a71b3..00000000000 --- a/content/rancher/v2.5/en/ecm/upgrading-kubernetes/_index.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: Upgrading and Rolling Back RKE Kubernetes Clusters -weight: 3 ---- - -Following an upgrade to the latest version of Rancher, downstream Kubernetes clusters can be upgraded to use the latest supported version of Kubernetes. - -Rancher calls RKE (Rancher Kubernetes Engine) as a library when provisioning and editing RKE clusters. For more information on configuring the upgrade strategy for RKE clusters, refer to the [RKE documentation]({{}}/rke/latest/en/). - -This section covers the following topics: - -- [New Features](#new-features) -- [Tested Kubernetes Versions](#tested-kubernetes-versions) -- [How Upgrades Work](#how-upgrades-work) -- [Recommended Best Practice for Upgrades](#recommended-best-practice-for-upgrades) -- [Upgrading the Kubernetes Version](#upgrading-the-kubernetes-version) -- [Rolling Back](#rolling-back) -- [Configuring the Upgrade Strategy](#configuring-the-upgrade-strategy) - - [Configuring the Maximum Unavailable Worker Nodes in the Rancher UI](#configuring-the-maximum-unavailable-worker-nodes-in-the-rancher-ui) - - [Enabling Draining Nodes During Upgrades from the Rancher UI](#enabling-draining-nodes-during-upgrades-from-the-rancher-ui) - - [Maintaining Availability for Applications During Upgrades](#maintaining-availability-for-applications-during-upgrades) - - [Configuring the Upgrade Strategy in the cluster.yml](#configuring-the-upgrade-strategy-in-the-cluster-yml) -- [Troubleshooting](#troubleshooting) - -# New Features - -As of Rancher v2.3.0, the Kubernetes metadata feature was added, which allows Rancher to ship Kubernetes patch versions without upgrading Rancher. For details, refer to the [section on Kubernetes metadata.]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata) - -As of Rancher v2.4.0, - -- The ability to import K3s Kubernetes clusters into Rancher was added, along with the ability to upgrade Kubernetes when editing those clusters. For details, refer to the [section on imported clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters) -- New advanced options are exposed in the Rancher UI for configuring the upgrade strategy of an RKE cluster: **Maximum Worker Nodes Unavailable** and **Drain nodes.** These options leverage the new cluster upgrade process of RKE v1.1.0, in which worker nodes are upgraded in batches, so that applications can remain available during cluster upgrades, under [certain conditions.](#maintaining-availability-for-applications-during-upgrades) - -# Tested Kubernetes Versions - -Before a new version of Rancher is released, it's tested with the latest minor versions of Kubernetes to ensure compatibility. For example, Rancher v2.3.0 is was tested with Kubernetes v1.15.4, v1.14.7, and v1.13.11. For details on which versions of Kubernetes were tested on each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.3.0/) - -# How Upgrades Work - -RKE v1.1.0 changed the way that clusters are upgraded. - -In this section of the [RKE documentation,]({{}}/rke/latest/en/upgrades/how-upgrades-work) you'll learn what happens when you edit or upgrade your RKE Kubernetes cluster. - - -# Recommended Best Practice for Upgrades - -{{% tabs %}} -{{% tab "Rancher v2.4+" %}} -When upgrading the Kubernetes version of a cluster, we recommend that you: - -1. Take a snapshot. -1. Initiate a Kubernetes upgrade. -1. If the upgrade fails, revert the cluster to the pre-upgrade Kubernetes version. Before restoring the cluster from the snapshot in the etcd datastore, the cluster should be running the pre-upgrade Kubernetes version. -1. Restore the cluster from the etcd snapshot. - -The restore operation will work on a cluster that is not in a healthy or active state. -{{% /tab %}} -{{% tab "Rancher prior to v2.4" %}} -When upgrading the Kubernetes version of a cluster, we recommend that you: - -1. Take a snapshot. -1. Initiate a Kubernetes upgrade. -1. If the upgrade fails, restore the cluster from the etcd snapshot. - -The cluster cannot be downgraded to a previous Kubernetes version. -{{% /tab %}} -{{% /tabs %}} - -# Upgrading the Kubernetes Version - -> **Prerequisites:** -> -> - The options below are available only for [Rancher-launched RKE Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) and [imported K3s Kubernetes clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/#additional-features-for-imported-k3s-clusters) -> - Before upgrading Kubernetes, [back up your cluster.]({{}}/rancher/v2.x/en/backups) - -1. From the **Global** view, find the cluster for which you want to upgrade Kubernetes. Select **⋮ > Edit**. - -1. Expand **Cluster Options**. - -1. From the **Kubernetes Version** drop-down, choose the version of Kubernetes that you want to use for the cluster. - -1. Click **Save**. - -**Result:** Kubernetes begins upgrading for the cluster. - -# Rolling Back - -A cluster can be restored to a backup in which the previous Kubernetes version was used. For more information, refer to the following sections: - -- [Backing up a cluster]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#how-snapshots-work) -- [Restoring a cluster from backup]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/#restoring-a-cluster-from-a-snapshot) - -# Configuring the Upgrade Strategy - -As of RKE v1.1.0, additional upgrade options became available to give you more granular control over the upgrade process. These options can be used to maintain availability of your applications during a cluster upgrade if certain [conditions and requirements]({{}}/rke/latest/en/upgrades/maintaining-availability) are met. - -The upgrade strategy can be configured in the Rancher UI, or by editing the `cluster.yml`. More advanced options are available by editing the `cluster.yml`. - -### Configuring the Maximum Unavailable Worker Nodes in the Rancher UI - -From the Rancher UI, the maximum number of unavailable worker nodes can be configured. During a cluster upgrade, worker nodes will be upgraded in batches of this size. - -By default, the maximum number of unavailable worker is defined as 10 percent of all worker nodes. This number can be configured as a percentage or as an integer. When defined as a percentage, the batch size is rounded down to the nearest node, with a minimum of one node. - -To change the default number or percentage of worker nodes, - -1. Go to the cluster view in the Rancher UI. -1. Click **⋮ > Edit.** -1. In the **Advanced Options** section, go to the **Maxiumum Worker Nodes Unavailable** field. Enter the percentage of worker nodes that can be upgraded in a batch. Optionally, select **Count** from the drop-down menu and enter the maximum unavailable worker nodes as an integer. -1. Click **Save.** - -**Result:** The cluster is updated to use the new upgrade strategy. - -### Enabling Draining Nodes During Upgrades from the Rancher UI - -By default, RKE [cordons](https://kubernetes.io/docs/concepts/architecture/nodes/#manual-node-administration) each node before upgrading it. [Draining](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) is disabled during upgrades by default. If draining is enabled in the cluster configuration, RKE will both cordon and drain the node before it is upgraded. - -To enable draining each node during a cluster upgrade, - -1. Go to the cluster view in the Rancher UI. -1. Click **⋮ > Edit.** -1. In the **Advanced Options** section, go to the **Drain nodes** field and click **Yes.** -1. Choose a safe or aggressive drain option. For more information about each option, refer to [this section.]({{}}/rancher/v2.x/en/cluster-admin/nodes/#aggressive-and-safe-draining-options) -1. Optionally, configure a grace period. The grace period is the timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. Pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If this value is negative, the default value specified in the pod will be used. -1. Optionally, configure a timeout, which is the amount of time the drain should continue to wait before giving up. -1. Click **Save.** - -**Result:** The cluster is updated to use the new upgrade strategy. - -> **Note:** As of Rancher v2.4.0, there is a [known issue](https://github.com/rancher/rancher/issues/25478) in which the Rancher UI doesn't show state of etcd and controlplane as drained, even though they are being drained. - -### Maintaining Availability for Applications During Upgrades - -_Available as of RKE v1.1.0_ - -In [this section of the RKE documentation,]({{}}/rke/latest/en/upgrades/maintaining-availability/) you'll learn the requirements to prevent downtime for your applications when upgrading the cluster. - -### Configuring the Upgrade Strategy in the cluster.yml - -More advanced upgrade strategy configuration options are available by editing the `cluster.yml`. - -For details, refer to [Configuring the Upgrade Strategy]({{}}/rke/latest/en/upgrades/configuring-strategy) in the RKE documentation. The section also includes an example `cluster.yml` for configuring the upgrade strategy. - -# Troubleshooting - -If a node doesn't come up after an upgrade, the `rke up` command errors out. - -No upgrade will proceed if the number of unavailable nodes exceeds the configured maximum. - -If an upgrade stops, you may need to fix an unavailable node or remove it from the cluster before the upgrade can continue. - -A failed node could be in many different states: - -- Powered off -- Unavailable -- User drains a node while upgrade is in process, so there are no kubelets on the node -- The upgrade itself failed - -If the max unavailable number of nodes is reached during an upgrade, Rancher user clusters will be stuck in updating state and not move forward with upgrading any other control plane nodes. It will continue to evaluate the set of unavailable nodes in case one of the nodes becomes available. If the node cannot be fixed, you must remove the node in order to continue the upgrade. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/upgrading-mcm/_index.md b/content/rancher/v2.5/en/ecm/upgrading-mcm/_index.md deleted file mode 100644 index e8978ea379b..00000000000 --- a/content/rancher/v2.5/en/ecm/upgrading-mcm/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Upgrading the Enterprise Cluster Manager -weight: 2 ---- - -> This page is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/ecm/upgrading-mcm/rollbacks/_index.md b/content/rancher/v2.5/en/ecm/upgrading-mcm/rollbacks/_index.md deleted file mode 100644 index 7ebe57e5c73..00000000000 --- a/content/rancher/v2.5/en/ecm/upgrading-mcm/rollbacks/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Rolling Back the Enterprise Cluster Manager -weight: 1 ---- - -> This page is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/faq/_index.md b/content/rancher/v2.5/en/faq/_index.md deleted file mode 100644 index 26fd9dbbeb7..00000000000 --- a/content/rancher/v2.5/en/faq/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: FAQ -weight: 20 ---- - -This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. - -See [Technical FAQ]({{}}/rancher/v2.x/en/faq/technical/), for frequently asked technical questions. - -
- -**Does Rancher v2.x support Docker Swarm and Mesos as environment types?** - -When creating an environment in Rancher v2.x, Swarm and Mesos will no longer be standard options you can select. However, both Swarm and Mesos will continue to be available as Catalog applications you can deploy. It was a tough decision to make but, in the end, it came down to adoption. For example, out of more than 15,000 clusters, only about 200 or so are running Swarm. - -
- -**Is it possible to manage Azure Kubernetes Services with Rancher v2.x?** - -Yes. - -
- -**Does Rancher support Windows?** - -As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) - -
- -**Does Rancher support Istio?** - -As of Rancher 2.3.0, we support [Istio.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/) - -Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along with any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/) - -
- -**Will Rancher v2.x support Hashicorp's Vault for storing secrets?** - -Secrets management is on our roadmap but we haven't assigned it to a specific release yet. - -
- -**Does Rancher v2.x support RKT containers as well?** - -At this time, we only support Docker. - -
- -**Does Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and imported Kubernetes?** - -Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave (Weave is available as of v2.2.0). Always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) for details about what is officially supported. - -
- -**Are you planning on supporting Traefik for existing setups?** - -We don't currently plan on providing embedded Traefik support, but we're still exploring load-balancing approaches. - -
- -**Can I import OpenShift Kubernetes clusters into v2.x?** - -Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. - -
- -**Are you going to integrate Longhorn?** - -Yes. Longhorn was on a bit of a hiatus while we were working on v2.0. We plan to re-engage on the project. \ No newline at end of file diff --git a/content/rancher/v2.5/en/faq/kubectl/_index.md b/content/rancher/v2.5/en/faq/kubectl/_index.md deleted file mode 100644 index b4172ab0a40..00000000000 --- a/content/rancher/v2.5/en/faq/kubectl/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Installing and Configuring kubectl -weight: 100 ---- - -`kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. - -### Installation - -See [kubectl Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/) for installation on your operating system. - -### Configuration - -When you create a Kubernetes cluster with RKE, RKE creates a `kube_config_rancher-cluster.yml` in the local directory that contains credentials to connect to your new cluster with tools like `kubectl` or `helm`. - -You can copy this file to `$HOME/.kube/config` or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_rancher-cluster.yml`. - -``` -export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml -``` - -Test your connectivity with `kubectl` and see if you can get the list of nodes back. - -``` -kubectl get nodes - NAME STATUS ROLES AGE VERSION -165.227.114.63 Ready controlplane,etcd,worker 11m v1.10.1 -165.227.116.167 Ready controlplane,etcd,worker 11m v1.10.1 -165.227.127.226 Ready controlplane,etcd,worker 11m v1.10.1 -``` diff --git a/content/rancher/v2.5/en/faq/mcm/networking/_index.md b/content/rancher/v2.5/en/faq/mcm/networking/_index.md deleted file mode 100644 index 863ad97169d..00000000000 --- a/content/rancher/v2.5/en/faq/mcm/networking/_index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Networking -weight: 8005 ---- - -Networking FAQ's - -- [CNI Providers]({{}}/rancher/v2.x/en/faq/networking/cni-providers/) - diff --git a/content/rancher/v2.5/en/faq/mcm/networking/cni-providers/_index.md b/content/rancher/v2.5/en/faq/mcm/networking/cni-providers/_index.md deleted file mode 100644 index 3a0352a90eb..00000000000 --- a/content/rancher/v2.5/en/faq/mcm/networking/cni-providers/_index.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: Container Network Interface (CNI) Providers -description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you -weight: 2300 ---- - -## What is CNI? - -CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. - -Kubernetes uses CNI as an interface between network providers and Kubernetes pod networking. - -![CNI Logo]({{}}/img/rancher/cni-logo.png) - -For more information visit [CNI GitHub project](https://github.com/containernetworking/cni). - -### What Network Models are Used in CNI? - -CNI network providers implement their network fabric using either an encapsulated network model such as Virtual Extensible Lan ([VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan)) or an unencapsulated network model such as Border Gateway Protocol ([BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol)). - -#### What is an Encapsulated Network? - -This network model provides a logical Layer 2 (L2) network encapsulated over the existing Layer 3 (L3) network topology that spans the Kubernetes cluster nodes. With this model you have an isolated L2 network for containers without needing routing distribution, all at the cost of minimal overhead in terms of processing and increased IP package size, which comes from an IP header generated by overlay encapsulation. Encapsulation information is distributed by UDP ports between Kubernetes workers, interchanging network control plane information about how MAC addresses can be reached. Common encapsulation used in this kind of network model is VXLAN, Internet Protocol Security (IPSec), and IP-in-IP. - -In simple terms, this network model generates a kind of network bridge extended between Kubernetes workers, where pods are connected. - -This network model is used when an extended L2 bridge is preferred. This network model is sensitive to L3 network latencies of the Kubernetes workers. If datacenters are in distinct geolocations, be sure to have low latencies between them to avoid eventual network segmentation. - -CNI network providers using this network model include Flannel, Canal, and Weave. - -![Encapsulated Network]({{}}/img/rancher/encapsulated-network.png) - -#### What is an Unencapsulated Network? - -This network model provides an L3 network to route packets between containers. This model doesn't generate an isolated l2 network, nor generates overhead. These benefits come at the cost of Kubernetes workers having to manage any route distribution that's needed. Instead of using IP headers for encapsulation, this network model uses a network protocol between Kubernetes workers to distribute routing information to reach pods, such as [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol). - -In simple terms, this network model generates a kind of network router extended between Kubernetes workers, which provides information about how to reach pods. - -This network model is used when a routed L3 network is preferred. This mode dynamically updates routes at the OS level for Kubernetes workers. It's less sensitive to latency. - -CNI network providers using this network model include Calico and Romana. - -![Unencapsulated Network]({{}}/img/rancher/unencapsulated-network.png) - -### What CNI Providers are Provided by Rancher? - -Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave (Weave is available as of v2.2.0). You can choose your CNI network provider when you create new Kubernetes clusters from Rancher. - -#### Canal - -![Canal Logo]({{}}/img/rancher/canal-logo.png) - -Canal is a CNI network provider that gives you the best of Flannel and Calico. It allows users to easily deploy Calico and Flannel networking together as a unified networking solution, combining Calico’s network policy enforcement with the rich superset of Calico (unencapsulated) and/or Flannel (encapsulated) network connectivity options. - -In Rancher, Canal is the default CNI network provider combined with Flannel and VXLAN encapsulation. - -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). For details, refer to [the port requirements for user clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/) - -{{< img "/img/rancher/canal-diagram.png" "Canal Diagram">}} - -For more information, see the [Canal GitHub Page.](https://github.com/projectcalico/canal) - -#### Flannel - -![Flannel Logo]({{}}/img/rancher/flannel-logo.png) - -Flannel is a simple and easy way to configure L3 network fabric designed for Kubernetes. Flannel runs a single binary agent named flanneld on each host, which is responsible for allocating a subnet lease to each host out of a larger, preconfigured address space. Flannel uses either the Kubernetes API or etcd directly to store the network configuration, the allocated subnets, and any auxiliary data (such as the host's public IP). Packets are forwarded using one of several backend mechanisms, with the default encapsulation being [VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). - -Encapsulated traffic is unencrypted by default. Therefore, flannel provides an experimental backend for encryption, [IPSec](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#ipsec), which makes use of [strongSwan](https://www.strongswan.org/) to establish encrypted IPSec tunnels between Kubernetes workers. - -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [the port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for more details. - -![Flannel Diagram]({{}}/img/rancher/flannel-diagram.png) - -For more information, see the [Flannel GitHub Page](https://github.com/coreos/flannel). - -#### Calico - -![Calico Logo]({{}}/img/rancher/calico-logo.png) - -Calico enables networking and network policy in Kubernetes clusters across the cloud. Calico uses a pure, unencapsulated IP network fabric and policy engine to provide networking for your Kubernetes workloads. Workloads are able to communicate over both cloud infrastructure and on-premise using BGP. - -Calico also provides a stateless IP-in-IP encapsulation mode that can be used, if necessary. Calico also offers policy isolation, allowing you to secure and govern your Kubernetes workloads using advanced ingress and egress policies. - -Kubernetes workers should open TCP port `179` (BGP). See [the port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for more details. - -![Calico Diagram]({{}}/img/rancher/calico-diagram.svg) - -For more information, see the following pages: - -- [Project Calico Official Site](https://www.projectcalico.org/) -- [Project Calico GitHub Page](https://github.com/projectcalico/calico) - - -#### Weave - -![Weave Logo]({{}}/img/rancher/weave-logo.png) - -Weave enables networking and network policy in Kubernetes clusters across the cloud. Additionally, it support encrypting traffic between the peers. - -Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for more details. - -For more information, see the following pages: - -- [Weave Net Official Site](https://www.weave.works/) - -### CNI Features by Provider - -The following table summarizes the different features available for each CNI network provider provided by Rancher. - -| Provider | Network Model | Route Distribution | Network Policies | Mesh | External Datastore | Encryption | Ingress/Egress Policies | -| ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | -| Canal | Encapsulated (VXLAN) | No | Yes | No | K8S API | No | Yes | -| Flannel | Encapsulated (VXLAN) | No | No | No | K8S API | No | No | -| Calico | Encapsulated (VXLAN,IPIP) OR Unencapsulated | Yes | Yes | Yes | Etcd and K8S API | No | Yes | -| Weave | Encapsulated | Yes | Yes | Yes | No | Yes | Yes | - -- Network Model: Encapsulated or unencapsulated. For more information, see [What Network Models are Used in CNI?](#what-network-models-are-used-in-cni) - -- Route Distribution: An exterior gateway protocol designed to exchange routing and reachability information on the Internet. BGP can assist with pod-to-pod networking between clusters. This feature is a must on unencapsulated CNI network providers, and it is typically done by BGP. If you plan to build clusters split across network segments, route distribution is a feature that's nice-to-have. - -- Network Policies: Kubernetes offers functionality to enforce rules about which services can communicate with each other using network policies. This feature is stable as of Kubernetes v1.7 and is ready to use with certain networking plugins. - -- Mesh: This feature allows service-to-service networking communication between distinct Kubernetes clusters. - -- External Datastore: CNI network providers with this feature need an external datastore for its data. - -- Encryption: This feature allows cyphered and secure network control and data planes. - -- Ingress/Egress Policies: This feature allows you to manage routing control for both Kubernetes and non-Kubernetes communications. - -#### CNI Community Popularity - -The following table summarizes different GitHub metrics to give you an idea of each project's popularity and activity. This data was collected in January 2020. - -| Provider | Project | Stars | Forks | Contributors | -| ---- | ---- | ---- | ---- | ---- | -| Canal | https://github.com/projectcalico/canal | 614 | 89 | 19 | -| flannel | https://github.com/coreos/flannel | 4977 | 1.4k | 140 | -| Calico | https://github.com/projectcalico/calico | 1534 | 429 | 135 | -| Weave | https://github.com/weaveworks/weave/ | 5737 | 559 | 73 | - -
-### Which CNI Provider Should I Use? - -It depends on your project needs. There are many different providers, which each have various features and options. There isn't one provider that meets everyone's needs. - -As of Rancher v2.0.7, Canal is the default CNI network provider. We recommend it for most use cases. It provides encapsulated networking for containers with Flannel, while adding Calico network policies that can provide project/namespace isolation in terms of networking. - -### How can I configure a CNI network provider? - -Please see [Cluster Options]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) and the options for [Network Plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/). diff --git a/content/rancher/v2.5/en/faq/mcm/removing-rancher/_index.md b/content/rancher/v2.5/en/faq/mcm/removing-rancher/_index.md deleted file mode 100644 index 8ea06632696..00000000000 --- a/content/rancher/v2.5/en/faq/mcm/removing-rancher/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Rancher is No Longer Needed -weight: 8010 ---- - -This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. - -- [If the Rancher server is deleted, what happens to the workloads in my downstream clusters?](#if-the-rancher-server-is-deleted-what-happens-to-the-workloads-in-my-downstream-clusters) -- [If the Rancher server is deleted, how do I access my downstream clusters?](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) -- [What if I don't want Rancher anymore?](#what-if-i-don-t-want-rancher-anymore) -- [What if I don't want my imported cluster managed by Rancher?](#what-if-i-don-t-want-my-imported-cluster-managed-by-rancher) -- [What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher?](#what-if-i-don-t-want-my-rke-cluster-or-hosted-kubernetes-cluster-managed-by-rancher) - -### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? - -If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. - -### If the Rancher server is deleted, how do I access my downstream clusters? - -The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: - -- **Imported clusters:** The cluster will be unaffected and you can access the cluster using the same methods that you did before the cluster was imported into Rancher. -- **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. -- **RKE clusters:** To access an [RKE cluster,]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) the cluster must have the [authorized cluster endpoint]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.]({{}}/rancher/v2.x/en/overview/architecture/#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. - -### What if I don't want Rancher anymore? - -If you [installed Rancher on a Kubernetes cluster,]({{}}/rancher/v2.x/en/installation/k8s-install/) remove Rancher by using the [System Tools]({{}}/rancher/v2.x/en/system-tools/) with the `remove` subcommand. - -If you installed Rancher with Docker, you can uninstall Rancher by removing the single Docker container that it runs in. - -Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) - -### What if I don't want my imported cluster managed by Rancher? - -If an imported cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was imported into Rancher. - -To detach the cluster, - -1. From the **Global** view in Rancher, go to the **Clusters** tab. -2. Go to the imported cluster that should be detached from Rancher and click **⋮ > Delete.** -3. Click **Delete.** - -**Result:** The imported cluster is detached from Rancher and functions normally outside of Rancher. - -### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? - -At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. - -The capability to manage these clusters without Rancher is being tracked in this [issue.](https://github.com/rancher/rancher/issues/25234) - -For information about how to access clusters if the Rancher server is deleted, refer to [this section.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) diff --git a/content/rancher/v2.5/en/faq/mcm/technical/_index.md b/content/rancher/v2.5/en/faq/mcm/technical/_index.md deleted file mode 100644 index 1151e35489c..00000000000 --- a/content/rancher/v2.5/en/faq/mcm/technical/_index.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: Technical -weight: 8006 ---- - -### How can I reset the administrator password? - -Docker Install: -``` -$ docker exec -ti reset-password -New password for default administrator (user-xxxxx): - -``` - -Kubernetes install (Helm): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- reset-password -New password for default administrator (user-xxxxx): - -``` - -Kubernetes install (RKE add-on): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG exec -n cattle-system $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name=="cattle-server") | .metadata.name') -- reset-password -New password for default administrator (user-xxxxx): - -``` - - -### I deleted/deactivated the last admin, how can I fix it? -Docker Install: -``` -$ docker exec -ti ensure-default-admin -New default administrator (user-xxxxx) -New password for default administrator (user-xxxxx): - -``` - -Kubernetes install (Helm): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin -New password for default administrator (user-xxxxx): - -``` - -Kubernetes install (RKE add-on): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG exec -n cattle-system $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name=="cattle-server") | .metadata.name') -- ensure-default-admin -New password for default admin user (user-xxxxx): - -``` - -### How can I enable debug logging? - -See [Troubleshooting: Logging]({{}}/rancher/v2.x/en/troubleshooting/logging/) - -### My ClusterIP does not respond to ping - -ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. - -### Where can I manage Node Templates? - -Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. - -### Why is my Layer-4 Load Balancer in `Pending` state? - -The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) - -### Where is the state of Rancher stored? - -- Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. -- Kubernetes install: in the etcd of the RKE cluster created to run Rancher. - -### How are the supported Docker versions determined? - -We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. - -### How can I access nodes created by Rancher? - -SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. - -![Download Keys]({{}}/img/rancher/downloadsshkeys.png) - -Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. Be sure to use the correct username (`rancher` or `docker` for RancherOS, `ubuntu` for Ubuntu, `ec2-user` for Amazon Linux) - -``` -$ ssh -i id_rsa user@ip_of_node -``` - -### How can I automate task X in Rancher? - -The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: - -* Visit `https://your_rancher_ip/v3` and browse the API options. -* Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) - -### The IP address of a node changed, how can I recover? - -A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. - -When the IP address of the node changed, Rancher lost connection to the node, so it will be unable to clean the node properly. See [Cleaning cluster nodes]({{}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) to clean the node. - -When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. - -### How can I add additional arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? - -You can add additional arguments/binds/environment variables via the [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables]({{}}/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls]({{}}/rke/latest/en/example-yamls/). - -### How do I check if my certificate chain is valid? - -Use the `openssl verify` command to validate your certificate chain: - ->**Note:** Configure `SSL_CERT_DIR` and `SSL_CERT_FILE` to a dummy location to make sure the OS installed certificates are not used when verifying manually. - -``` -SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem rancher.yourdomain.com.pem -rancher.yourdomain.com.pem: OK -``` - -If you receive the error `unable to get local issuer certificate`, the chain is incomplete. This usually means that there is an intermediate CA certificate that issued your server certificate. If you already have this certificate, you can use it in the verification of the certificate like shown below: - -``` -SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem -untrusted intermediate.pem rancher.yourdomain.com.pem -rancher.yourdomain.com.pem: OK -``` - -If you have successfully verified your certificate chain, you should include needed intermediate CA certificates in the server certificate to complete the certificate chain for any connection made to Rancher (for example, by the Rancher agent). The order of the certificates in the server certificate file should be first the server certificate itself (contents of `rancher.yourdomain.com.pem`), followed by intermediate CA certificate(s) (contents of `intermediate.pem`). - -``` ------BEGIN CERTIFICATE----- -%YOUR_CERTIFICATE% ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -%YOUR_INTERMEDIATE_CERTIFICATE% ------END CERTIFICATE----- -``` - -If you still get errors during verification, you can retrieve the subject and the issuer of the server certificate using the following command: - -``` -openssl x509 -noout -subject -issuer -in rancher.yourdomain.com.pem -subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com -issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA -``` - -### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? - -Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. - -Check `Common Name`: - -``` -openssl x509 -noout -subject -in cert.pem -subject= /CN=rancher.my.org -``` - -Check `Subject Alternative Names`: - -``` -openssl x509 -noout -in cert.pem -text | grep DNS - DNS:rancher.my.org -``` - -### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? - -This is due to a combination of the following default Kubernetes settings: - -* kubelet - * `node-status-update-frequency`: Specifies how often kubelet posts node status to master (default 10s) -* kube-controller-manager - * `node-monitor-period`: The period for syncing NodeStatus in NodeController (default 5s) - * `node-monitor-grace-period`: Amount of time which we allow running Node to be unresponsive before marking it unhealthy (default 40s) - * `pod-eviction-timeout`: The grace period for deleting pods on failed nodes (default 5m0s) - -See [Kubernetes: kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) and [Kubernetes: kube-controller-manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) for more information on these settings. - -In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. See [Kubernetes: Taint based Evictions](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#taint-based-evictions) for more information. - -* kube-apiserver (Kubernetes v1.13 and up) - * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. - * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. - -### Can I use keyboard shortcuts in the UI? - -Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. diff --git a/content/rancher/v2.5/en/faq/security/_index.md b/content/rancher/v2.5/en/faq/security/_index.md deleted file mode 100644 index f9d6ec86452..00000000000 --- a/content/rancher/v2.5/en/faq/security/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Security -weight: 8007 - ---- - -**Is there a Hardening Guide?** - -The Hardening Guide is now located in the main [Security]({{}}/rancher/v2.x/en/security/) section. - -
- -**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** - -We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security]({{}}/rancher/v2.x/en/security/) section. diff --git a/content/rancher/v2.5/en/faq/telemetry/_index.md b/content/rancher/v2.5/en/faq/telemetry/_index.md deleted file mode 100644 index 6ab582667e1..00000000000 --- a/content/rancher/v2.5/en/faq/telemetry/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Telemetry -weight: 8008 ---- - -### What is Telemetry? - -Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. - -### What information is collected? - -No specific identifying information like usernames, passwords, or the names or addresses of user resources will ever be collected. - -The primary things collected include: - - - Aggregate counts (smallest, average, largest, total) of nodes per-cluster and their size (e.g. CPU cores & RAM). - - Aggregate counts of logical resources like Clusters, Projects, Namespaces, and Pods. - - Counts of what driver was used to deploy clusters and nodes (e.g. GKE vs EC2 vs Imported vs Custom). - - Versions of Kubernetes components, Operating Systems and Docker that are deployed on nodes. - - Whether some optional components are enabled or not (e.g. which auth providers are used). - - The image name & version of Rancher that is running. - - A unique randomly-generated identifier for this installation. - -### Can I see the information that is being sent? - -If Telemetry is enabled, you can go to `https:///v1-telemetry` in your installation to see the current data. - -If Telemetry is not enabled, the process that collects the data is not running, so there is nothing being collected to look at. - -### How do I turn it on or off? - -After initial setup, an administrator can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`. diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/_index.md deleted file mode 100644 index 03c104d537c..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Install on a Kubernetes Cluster -description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation -weight: 3 ---- - -> This section is under construction. - -Rancher is both a platform and a Kubernetes distribution. In this section you'll learn how to set up a Rancher Kubernetes cluster that has the Rancher server Helm chart installed. - -There are two main ways that Rancher can be installed: - -1. You can use Helm to install the Rancher Helm chart on any Kubernetes cluster. -2. You can use the Rancher CLI to install a Rancher Kubernetes cluster. This cluster comes with the Rancher Helm chart built in. - -The installation path that you choose will affect the way that you upgrade Rancher, but not the way that Rancher is backed up and restored. - -This section focuses on the installation path in which the Rancher Helm chart is installed on an existing Kubernetes cluster. diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/_index.md deleted file mode 100644 index df088c803ec..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/_index.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -title: Install -weight: 1 ---- - -> **Prerequisite:** -> Set up the Rancher Kubernetes cluster. Rancher can be installed on any Kubernetes cluster. -> -> This cluster can use upstream Kubernetes, or it can use one of Rancher's Kubernetes distributions, or it can be a managed Kubernetes cluster from a provider such as Amazon EKS. - -# Install the Rancher Helm Chart - -Rancher is installed using the Helm package manager for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. - -With Helm, we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at https://helm.sh/. - -For systems without direct internet access, see [Air Gap: Kubernetes install]({{}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/). - -To choose a Rancher version to install, refer to [Choosing a Rancher Version.]({{}}/rancher/v2.x/en/installation/options/server-tags) - -To choose a version of Helm to install Rancher with, refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) - -> **Note:** The installation instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.x/en/installation/options/helm2) provides a copy of the older installation instructions for Rancher installed on an RKE Kubernetes cluster with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -To set up Rancher, - -1. [Install the required CLI tools](#1-install-the-required-cli-tools) -2. [Add the Helm chart repository](#2-add-the-helm-chart-repository) -3. [Create a namespace for Rancher](#3-create-a-namespace-for-rancher) -4. [Choose your SSL configuration](#4-choose-your-ssl-configuration) -5. [Install cert-manager](#5-install-cert-manager) (unless you are bringing your own certificates, or TLS will be terminated on a load balancer) -6. [Install Rancher with Helm and your chosen certificate option](#6-install-rancher-with-helm-and-your-chosen-certificate-option) -7. [Verify that the Rancher server is successfully deployed](#7-verify-that-the-rancher-server-is-successfully-deployed) -8. [Save your options](#8-save-your-options) - -### 1. Install the Required CLI Tools - -The following CLI tools are required for setting up the Kubernetes cluster. Please make sure these tools are installed and available in your `$PATH`. - -Refer to the [instructions provided by the Helm project](https://helm.sh/docs/intro/install/) for your specific platform. - -- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -### 2. Add the Helm Chart Repository - -Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). - -{{< release-channel >}} - -``` -helm repo add rancher- https://releases.rancher.com/server-charts/ -``` - -### 3. Create a Namespace for Rancher - -We'll need to define a Kubernetes namespace where the resources created by the Chart should be installed. This should always be `cattle-system`: - -``` -kubectl create namespace cattle-system -``` - -### 4. Choose your SSL Configuration - -The Rancher management server is designed to be secure by default and requires SSL/TLS configuration. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination). - -There are three recommended options for the source of the certificate used for TLS termination at the Rancher server: - -- **Rancher-generated TLS certificate:** In this case, you will need to install `cert-manager` into the cluster. Rancher utilizes `cert-manager` to issue and maintain its certificates. Rancher will generate a CA certificate of its own, and sign a cert using that CA. `cert-manager` is then responsible for managing that certificate. -- **Let's Encrypt:** The Let's Encrypt option also uses `cert-manager`. However, in this case, cert-manager is combined with a special Issuer for Let's Encrypt that performs all actions (including request and validation) necessary for getting a Let's Encrypt issued cert. This configuration uses HTTP validation (`HTTP-01`), so the load balancer must have a public DNS record and be accessible from the internet. -- **Bring your own certificate:** This option allows you to bring your own public- or private-CA signed certificate. Rancher will use that certificate to secure websocket and HTTPS traffic. In this case, you must upload this certificate (and associated key) as PEM-encoded files with the name `tls.crt` and `tls.key`. If you are using a private CA, you must also upload that certificate. This is due to the fact that this private CA may not be trusted by your nodes. Rancher will take that CA certificate, and generate a checksum from it, which the various Rancher components will use to validate their connection to Rancher. - - -| Configuration | Helm Chart Option | Requires cert-manager | -| ------------------------------ | ----------------------- | ------------------------------------- | -| Rancher Generated Certificates (Default) | `ingress.tls.source=rancher` | [yes](#5-install-cert-manager) | -| Let’s Encrypt | `ingress.tls.source=letsEncrypt` | [yes](#5-install-cert-manager) | -| Certificates from Files | `ingress.tls.source=secret` | no | - -### 5. Install cert-manager - -> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination). - -This step is only required to use certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) or to request Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). - -{{% accordion id="cert-manager" label="Click to Expand" %}} - -> **Important:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). - -These instructions are adapted from the [official cert-manager documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm). - -``` -# Install the CustomResourceDefinition resources separately -kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.15.0/cert-manager.crds.yaml - -# **Important:** -# If you are running Kubernetes v1.15 or below, you -# will need to add the `--validate=false` flag to your -# kubectl apply command, or else you will receive a -# validation error relating to the -# x-kubernetes-preserve-unknown-fields field in -# cert-manager’s CustomResourceDefinition resources. -# This is a benign error and occurs due to the way kubectl -# performs resource validation. - -# Create the namespace for cert-manager -kubectl create namespace cert-manager - -# Add the Jetstack Helm repository -helm repo add jetstack https://charts.jetstack.io - -# Update your local Helm chart repository cache -helm repo update - -# Install the cert-manager Helm chart -helm install \ - cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --version v0.15.0 -``` - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: - -``` -kubectl get pods --namespace cert-manager - -NAME READY STATUS RESTARTS AGE -cert-manager-5c6866597-zw7kh 1/1 Running 0 2m -cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m -cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m -``` - -{{% /accordion %}} - -### 6. Install Rancher with Helm and Your Chosen Certificate Option - -The exact command to install Rancher differs depending on the certificate configuration. - -{{% tabs %}} -{{% tab "Rancher-generated Certificates" %}} - - -The default is for Rancher to generate a CA and uses `cert-manager` to issue the certificate for access to the Rancher server interface. - -Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. - -- Set the `hostname` to the DNS name you pointed at your load balancer. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. -- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6` - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -{{% /tab %}} -{{% tab "Let's Encrypt" %}} - -This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. - -In the following command, - -- `hostname` is set to the public DNS record, -- `ingress.tls.source` is set to `letsEncrypt` -- `letsEncrypt.email` is set to the email address used for communication about your certificate (for example, expiry notices) -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=letsEncrypt \ - --set letsEncrypt.email=me@example.org -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -{{% /tab %}} -{{% tab "Certificates from Files" %}} -In this option, Kubernetes secrets are created from your own certificates for Rancher to use. - -When you run this command, the `hostname` option must match the `Common Name` or a `Subject Alternative Names` entry in the server certificate or the Ingress controller will fail to configure correctly. - -Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers and applications. - -> If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{}}/rancher/v2.x/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) - -- Set the `hostname`. -- Set `ingress.tls.source` to `secret`. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret -``` - -If you are using a Private CA signed certificate , add `--set privateCA=true` to the command: - -``` -helm install rancher rancher-latest/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret \ - --set privateCA=true -``` - -Now that Rancher is deployed, see [Adding TLS Secrets]({{}}/rancher/v2.x/en/installation/options/tls-secrets/) to publish the certificate files so Rancher and the Ingress controller can use them. -{{% /tab %}} -{{% /tabs %}} - -The Rancher chart configuration has many options for customizing the installation to suit your specific environment. Here are some common advanced scenarios. - -- [HTTP Proxy]({{}}/rancher/v2.x/en/installation/options/chart-options/#http-proxy) -- [Private Docker Image Registry]({{}}/rancher/v2.x/en/installation/options/chart-options/#private-registry-and-air-gap-installs) -- [TLS Termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination) - -See the [Chart Options]({{}}/rancher/v2.x/en/installation/options/chart-options/) for the full list of options. - - -### 7. Verify that the Rancher Server is Successfully Deployed - -After adding the secrets, check if Rancher was rolled out successfully: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: - -``` -kubectl -n cattle-system get deploy rancher -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -rancher 3 3 3 3 3m -``` - -It should show the same count for `DESIRED` and `AVAILABLE`. - -### 8. Save Your Options - -Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. - -### Finishing Up - -That's it. You should have a functional Rancher server. - -In a web browser, go to the DNS name that forwards traffic to your load balancer. Then you should be greeted by the colorful login page. - -Doesn't work? Take a look at the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) Page - - -### Optional Next Steps - -Enable the Enterprise Cluster Manager. \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/chart-options/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/chart-options/_index.md deleted file mode 100644 index 0cb9130abd5..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/chart-options/_index.md +++ /dev/null @@ -1,255 +0,0 @@ ---- -title: Helm Chart Options -weight: 2 ---- - -> This page is under construction. - -- [Common Options](#common-options) -- [Advanced Options](#advanced-options) -- [API Audit Log](#api-audit-log) -- [Setting Extra Environment Variables](#setting-extra-environment-variables) -- [TLS Settings](#tls-settings) -- [Import local Cluster](#import-local-cluster) -- [Customizing your Ingress](#customizing-your-ingress) -- [HTTP Proxy](#http-proxy) -- [Additional Trusted CAs](#additional-trusted-cas) -- [Private Registry and Air Gap Installs](#private-registry-and-air-gap-installs) -- [External TLS Termination](#external-tls-termination) - -### Common Options - -| Option | Default Value | Description | -| ------------------------- | ------------- | ---------------------------------------------------------------------------------- | -| `hostname` | " " | `string` - the Fully Qualified Domain Name for your Rancher Server | -| `ingress.tls.source` | "rancher" | `string` - Where to get the cert for the ingress. - "rancher, letsEncrypt, secret" | -| `letsEncrypt.email` | " " | `string` - Your email address | -| `letsEncrypt.environment` | "production" | `string` - Valid options: "staging, production" | -| `privateCA` | false | `bool` - Set to true if your cert is signed by a private CA | - -
- -### Advanced Options - -| Option | Default Value | Description | -| ------------------------------ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `additionalTrustedCAs` | false | `bool` - See [Additional Trusted CAs](#additional-trusted-cas) | -| `addLocal` | "auto" | `string` - Have Rancher detect and import the "local" Rancher server cluster [Import "local Cluster](#import-local-cluster) | -| `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | -| `replicas` | 3 | `int` - Number of replicas of Rancher pods | -| `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | -| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.level` | 0 | `int` - set the [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing) level. 0 is off. [0-3] | -| `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxBackups` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | -| `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs _Note: Available as of v2.2.0_ | -| `debug` | false | `bool` - set debug flag on rancher server | -| `certmanager.version` | "" | `string` - set cert-manager compatibility | -| `extraEnv` | [] | `list` - set additional environment variables for Rancher _Note: Available as of v2.2.0_ | -| `imagePullSecrets` | [] | `list` - list of names of Secret resource containing private registry credentials | -| `ingress.extraAnnotations` | {} | `map` - additional annotations to customize the ingress | -| `ingress.configurationSnippet` | "" | `string` - Add additional Nginx configuration. Can be used for proxy configuration. _Note: Available as of v2.0.15, v2.1.10 and v2.2.4_ | -| `letsEncrypt.ingress.class` | "" | `string` - optional ingress class for the cert-manager acmesolver ingress that responds to the Let's Encrypt ACME challenges | -| `proxy` | "" | `string` - HTTP[S] proxy server for Rancher | -| `noProxy` | "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" | `string` - comma separated list of hostnames or ip address not to use the proxy | -| `resources` | {} | `map` - rancher pod resource requests & limits | -| `rancherImage` | "rancher/rancher" | `string` - rancher image source | -| `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | -| `rancherImagePullPolicy` | "IfNotPresent" | `string` - Override imagePullPolicy for rancher server images - "Always", "Never", "IfNotPresent" | -| `tls` | "ingress" | `string` - See [External TLS Termination](#external-tls-termination) for details. - "ingress, external" | -| `systemDefaultRegistry` | "" | `string` - private registry to be used for all system Docker images, e.g., http://registry.example.com/ | -| `useBundledSystemChart` | `false` | `bool` - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. | - -
- -### API Audit Log - -Enabling the [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing/). - -You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) for the `System` Project on the Rancher server cluster. - -```plain ---set auditLog.level=1 -``` - -By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) for the Rancher server cluster or System Project. - -Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. - -### Setting Extra Environment Variables - -You can set extra environment variables for Rancher server using `extraEnv`. This list uses the same `name` and `value` keys as the container manifest definitions. Remember to quote the values. - -```plain ---set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' ---set 'extraEnv[0].value=1.0' -``` - -### TLS Settings - -To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: - -```plain ---set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' ---set 'extraEnv[0].value=1.0' -``` - -See [TLS settings]({{}}/rancher/v2.x/en/admin-settings/tls-settings) for more information and options. - -### Import `local` Cluster - -By default Rancher server will detect and import the `local` cluster it's running on. User with access to the `local` cluster will essentially have "root" access to all the clusters managed by Rancher server. - -If this is a concern in your environment you can set this option to "false" on your initial install. - -> Note: This option is only effective on the initial Rancher install. See [Issue 16522](https://github.com/rancher/rancher/issues/16522) for more information. - -```plain ---set addLocal="false" -``` - -### Customizing your Ingress - -To customize or use a different ingress with Rancher server you can set your own Ingress annotations. - -Example on setting a custom certificate issuer: - -```plain ---set ingress.extraAnnotations.'certmanager\.k8s\.io/cluster-issuer'=ca-key-pair -``` - -Example on setting a static proxy header with `ingress.configurationSnippet`. This value is parsed like a template so variables can be used. - -```plain ---set ingress.configurationSnippet='more_set_input_headers X-Forwarded-Host {{ .Values.hostname }};' -``` - -### HTTP Proxy - -Rancher requires internet access for some functionality (helm charts). Use `proxy` to set your proxy server. - -Add your IP exceptions to the `noProxy` list. Make sure you add the Service cluster IP range (default: 10.43.0.1/16) and any worker cluster `controlplane` nodes. Rancher supports CIDR notation ranges in this list. - -```plain ---set proxy="http://:@:/" ---set noProxy="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16" -``` - -### Additional Trusted CAs - -If you have private registries, catalogs or a proxy that intercepts certificates, you may need to add additional trusted CAs to Rancher. - -```plain ---set additionalTrustedCAs=true -``` - -Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. - -```plain -kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem -``` - -### Private Registry and Air Gap Installs - -For details on installing Rancher with a private registry, see: - -- [Air Gap: Docker Install]({{}}/rancher/v2.x/en/installation/air-gap-single-node/) -- [Air Gap: Kubernetes Install]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/) - -### External TLS Termination - -We recommend configuring your load balancer as a Layer 4 balancer, forwarding plain 80/tcp and 443/tcp to the Rancher Management cluster nodes. The Ingress Controller on the cluster will redirect http traffic on port 80 to https on port 443. - -You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. - -> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate]({{}}/rancher/v2.x/en/installation/options/tls-secrets/#using-a-private-ca-signed-certificate) to add the CA cert for Rancher. - -Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. - -#### Configuring Ingress for External TLS when Using NGINX v0.25 - -In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: - -```yaml -ingress: - provider: nginx - options: - use-forwarded-headers: 'true' -``` - -#### Required Headers - -- `Host` -- `X-Forwarded-Proto` -- `X-Forwarded-Port` -- `X-Forwarded-For` - -#### Recommended Timeouts - -- Read Timeout: `1800 seconds` -- Write Timeout: `1800 seconds` -- Connect Timeout: `30 seconds` - -#### Health Checks - -Rancher will respond `200` to health checks on the `/healthz` endpoint. - -#### Example NGINX config - -This NGINX configuration is tested on NGINX 1.14. - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). - -- Replace `IP_NODE1`, `IP_NODE2` and `IP_NODE3` with the IP addresses of the nodes in your cluster. -- Replace both occurrences of `FQDN` to the DNS name for Rancher. -- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. - -``` -worker_processes 4; -worker_rlimit_nofile 40000; - -events { - worker_connections 8192; -} - -http { - upstream rancher { - server IP_NODE_1:80; - server IP_NODE_2:80; - server IP_NODE_3:80; - } - - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - server { - listen 443 ssl http2; - server_name FQDN; - ssl_certificate /certs/fullchain.pem; - ssl_certificate_key /certs/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } - } - - server { - listen 80; - server_name FQDN; - return 301 https://$server_name$request_uri; - } -} -``` diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/choosing-version/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/choosing-version/_index.md deleted file mode 100644 index 36edeae1f5a..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/choosing-version/_index.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Choosing a Rancher Version -weight: 1 ---- - -This section describes how to choose a Rancher version. - -For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a **Helm chart** on a Kubernetes cluster. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -For Docker installations of Rancher, which is used for development and testing, you will install Rancher as a **Docker image.** - -{{% tabs %}} -{{% tab "Helm Charts" %}} - -When installing, upgrading, or rolling back Rancher Server when it is [installed on a Kubernetes cluster]({{}}/rancher/v2.x/en/installation/k8s-install/), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. - -Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -### Helm Chart Repositories - -Rancher provides several different Helm chart repositories to choose from. We align our latest and stable Helm chart repositories with the Docker tags that are used for a Docker installation. Therefore, the `rancher-latest` repository will contain charts for all the Rancher versions that have been tagged as `rancher/rancher:latest`. When a Rancher version has been promoted to the `rancher/rancher:stable`, it will get added to the `rancher-stable` repository. - -| Type | Command to Add the Repo | Description of the Repo | -| -------------- | ------------ | ----------------- | -| rancher-latest | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. | -| rancher-stable | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. | -| rancher-alpha | `helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha` | Adds a repository of Helm charts for alpha versions of Rancher for previewing upcoming releases. These releases are discouraged in production environments. Upgrades _to_ or _from_ charts in the rancher-alpha repository to any other chart, regardless or repository, aren't supported. | - -
-Instructions on when to select these repos are available below in [Switching to a Different Helm Chart Repository](#switching-to-a-different-helm-chart-repository). - -> **Note:** The introduction of the `rancher-latest` and `rancher-stable` Helm Chart repositories was introduced after Rancher v2.1.0, so the `rancher-stable` repository contains some Rancher versions that were never marked as `rancher/rancher:stable`. The versions of Rancher that were tagged as `rancher/rancher:stable` prior to v2.1.0 are v2.0.4, v2.0.6, v2.0.8. Post v2.1.0, all charts in the `rancher-stable` repository will correspond with any Rancher version tagged as `stable`. - -### Helm Chart Versions - -Rancher Helm chart versions match the Rancher version (i.e `appVersion`). - -For the Rancher v2.1.x versions, there were some Helm charts, that were using a version that was a build number, i.e. `yyyy.mm.`. These charts have been replaced with the equivalent Rancher version and are no longer available. - -### Switching to a Different Helm Chart Repository - -After installing Rancher, if you want to change which Helm chart repository to install Rancher from, you will need to follow these steps. - -> **Note:** Because the rancher-alpha repository contains only alpha charts, switching between the rancher-alpha repository and the rancher-stable or rancher-latest repository for upgrades is not supported. - -{{< release-channel >}} - -1. List the current Helm chart repositories. - - ```plain - helm repo list - - NAME URL - stable https://kubernetes-charts.storage.googleapis.com - rancher- https://releases.rancher.com/server-charts/ - ``` - -2. Remove the existing Helm Chart repository that contains your charts to install Rancher, which will either be `rancher-stable` or `rancher-latest` depending on what you had initially added. - - ```plain - helm repo remove rancher- - ``` - -3. Add the Helm chart repository that you want to start installing Rancher from. - - ```plain - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -4. Continue to follow the steps to [upgrade Rancher]({{}}/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/) from the new Helm chart repository. -{{% /tab %}} -{{% tab "Docker Images" %}} -When performing [Docker installs]({{}}/rancher/v2.x/en/installation/single-node), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. - -### Server Tags - -Rancher Server is distributed as a Docker image, which have tags attached to them. You can specify this tag when entering the command to deploy Rancher. Remember that if you use a tag without an explicit version (like `latest` or `stable`), you must explicitly pull a new version of that image tag. Otherwise, any image cached on the host will be used. - -| Tag | Description | -| -------------------------- | ------ | -| `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. | -| `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. | -| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. | - -> **Notes:** -> -> - The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. -> - Want to install an alpha review for preview? Install using one of the alpha tags listed on our [announcements page](https://forums.rancher.com/c/announcements) (e.g., `v2.2.0-alpha1`). Caveat: Alpha releases cannot be upgraded to or from any other release. - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/helm-version/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/helm-version/_index.md deleted file mode 100644 index 3a91781871a..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/helm-version/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Helm Version Requirements -weight: 3 ---- - -This section contains the requirements for Helm, which is the tool used to install Rancher on a high-availability Kubernetes cluster. - -> The installation instructions have been updated for Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 Migration Docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) [This section]({{}}/rancher/v2.x/en/installation/options/helm2) provides a copy of the older high-availability Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -- Helm v2.16.0 or higher is required for Kubernetes v1.16. For the default Kubernetes version, refer to the [release notes](https://github.com/rancher/rke/releases) for the version of RKE that you are using. -- Helm v2.15.0 should not be used, because of an issue with converting/comparing numbers. -- Helm v2.12.0 should not be used, because of an issue with `cert-manager`. diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/_index.md deleted file mode 100644 index 5743469d2de..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Resources and Other Installation Methods -weight: 3 ---- - -> This section is under construction. - -### Docker Installations - -The [single-node Docker installation]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. - -Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -### Air Gapped Installations - -Follow [these steps]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) to install the Rancher server in an air gapped environment. - -An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -### Advanced Options - -When installing Rancher, there are several advanced options that can be enabled during installation. Within each install guide, these options are presented. Learn more about these options: - -| Advanced Option | Available as of | -| ----------------------------------------------------------------------------------------------------------------------- | --------------- | -| [Custom CA Certificate]({{}}/rancher/v2.x/en/installation/options/custom-ca-root-certificate/) | v2.0.0 | -| [API Audit Log]({{}}/rancher/v2.x/en/installation/options/api-audit-log/) | v2.0.0 | -| [TLS Settings]({{}}/rancher/v2.x/en/installation/options/tls-settings/) | v2.1.7 | -| [etcd configuration]({{}}/rancher/v2.x/en/installation/options/etcd/) | v2.2.0 | -| [Local System Charts for Air Gap Installations]({{}}/rancher/v2.x/en/installation/options/local-system-charts) | v2.3.0 | diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/_index.md deleted file mode 100644 index 6087cf17317..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Advanced -weight: 5 ---- - -> This section is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/api-audit-log/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/api-audit-log/_index.md deleted file mode 100644 index d68051fc7bb..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/api-audit-log/_index.md +++ /dev/null @@ -1,577 +0,0 @@ ---- -title: Enabling the API Audit Log to Record System Events -weight: 4 ---- - -You can enable the API audit log to record the sequence of system events initiated by individual users. You can know what happened, when it happened, who initiated it, and what cluster it affected. When you enable this feature, all requests to the Rancher API and all responses from it are written to a log. - -You can enable API Auditing during Rancher installation or upgrade. - -## Enabling API Audit Log - -The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. - -- [Docker Install]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#api-audit-log) - -- [Kubernetes Install]({{}}/rancher/v2.x/en/installation/options/chart-options/#api-audit-log) - -## API Audit Log Options - -The usage below defines rules about what the audit log should record and what data it should include: - -| Parameter | Description | -| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `AUDIT_LEVEL` | `0` - Disable audit log (default setting).
`1` - Log event metadata.
`2` - Log event metadata and request body.
`3` - Log event metadata, request body, and response body. Each log transaction for a request/response pair uses the same `auditID` value.

See [Audit Level Logging](#audit-log-levels) for a table that displays what each setting logs. | -| `AUDIT_LOG_PATH` | Log path for Rancher Server API. Default path is `/var/log/auditlog/rancher-api-audit.log`. You can mount the log directory to host.

Usage Example: `AUDIT_LOG_PATH=/my/custom/path/`
| -| `AUDIT_LOG_MAXAGE` | Defined the maximum number of days to retain old audit log files. Default is 10 days. | -| `AUDIT_LOG_MAXBACKUP` | Defines the maximum number of audit log files to retain. Default is 10. | -| `AUDIT_LOG_MAXSIZE` | Defines the maximum size in megabytes of the audit log file before it gets rotated. Default size is 100M. | - -
- -### Audit Log Levels - -The following table displays what parts of API transactions are logged for each [`AUDIT_LEVEL`](#audit-level) setting. - -| `AUDIT_LEVEL` Setting | Request Metadata | Request Body | Response Metadata | Response Body | -| --------------------- | ---------------- | ------------ | ----------------- | ------------- | -| `0` | | | | | -| `1` | ✓ | | | | -| `2` | ✓ | ✓ | | | -| `3` | ✓ | ✓ | ✓ | ✓ | - -## Viewing API Audit Logs - -### Docker Install - -Share the `AUDIT_LOG_PATH` directory (Default: `/var/log/auditlog`) with the host system. The log can be parsed by standard CLI tools or forwarded on to a log collection tool like Fluentd, Filebeat, Logstash, etc. - -### Kubernetes Install - -Enabling the API Audit Log with the Helm chart install will create a `rancher-audit-log` sidecar container in the Rancher pod. This container will stream the log to standard output (stdout). You can view the log as you would any container log. - -The `rancher-audit-log` container is part of the `rancher` pod in the `cattle-system` namespace. - -#### CLI - -```bash -kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log -``` - -#### Rancher Web GUI - -1. From the context menu, select **Cluster: local > System**. - - ![Local Cluster: System Project]({{}}/img/rancher/audit_logs_gui/context_local_system.png) - -1. From the main navigation bar, choose **Resources > Workloads.** (In versions prior to v2.3.0, choose **Workloads** on the main navigation bar.) Find the `cattle-system` namespace. Open the `rancher` workload by clicking its link. - - ![Rancher Workload]({{}}/img/rancher/audit_logs_gui/rancher_workload.png) - -1. Pick one of the `rancher` pods and select **⋮ > View Logs**. - - ![View Logs]({{}}/img/rancher/audit_logs_gui/view_logs.png) - -1. From the **Logs** drop-down, select `rancher-audit-log`. - - ![Select Audit Log]({{}}/img/rancher/audit_logs_gui/rancher_audit_log_container.png) - -#### Shipping the Audit Log - -You can enable Rancher's built in log collection and shipping for the cluster to ship the audit and other services logs to a supported collection endpoint. See [Rancher Tools - Logging]({{}}/rancher/v2.x/en/cluster-admin/tools/logging) for details. - -## Audit Log Samples - -After you enable auditing, each API request or response is logged by Rancher in the form of JSON. Each of the following code samples provide examples of how to identify each API transaction. - -### Metadata Level - -If you set your `AUDIT_LEVEL` to `1`, Rancher logs the metadata header for every API request, but not the body. The header provides basic information about the API transaction, such as the transaction's ID, who initiated the transaction, the time it occurred, etc. - -```json -{ - "auditID": "30022177-9e2e-43d1-b0d0-06ef9d3db183", - "requestURI": "/v3/schemas", - "sourceIPs": ["::1"], - "user": { - "name": "user-f4tt2", - "group": ["system:authenticated"] - }, - "verb": "GET", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:22:43 +0800" -} -``` - -### Metadata and Request Body Level - -If you set your `AUDIT_LEVEL` to `2`, Rancher logs the metadata header and body for every API request. - -The code sample below depicts an API request, with both its metadata header and body. - -```json -{ - "auditID": "ef1d249e-bfac-4fd0-a61f-cbdcad53b9bb", - "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "sourceIPs": ["::1"], - "user": { - "name": "user-f4tt2", - "group": ["system:authenticated"] - }, - "verb": "PUT", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:28:08 +0800", - "requestBody": { - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "paused": false, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements", - "requests": {}, - "limits": {} - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container", - "environmentFrom": [], - "capAdd": [], - "capDrop": [], - "livenessProbe": null, - "volumeMounts": [] - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "name": "nginx", - "namespaceId": "default", - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": ["10.64.3.58"], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport", - "type": "publicEndpoint" - } - ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" - }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" - }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "scheduling": { - "node": {} - }, - "description": "my description", - "volumes": [] - } -} -``` - -### Metadata, Request Body, and Response Body Level - -If you set your `AUDIT_LEVEL` to `3`, Rancher logs: - -- The metadata header and body for every API request. -- The metadata header and body for every API response. - -#### Request - -The code sample below depicts an API request, with both its metadata header and body. - -```json -{ - "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", - "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "sourceIPs": ["::1"], - "user": { - "name": "user-f4tt2", - "group": ["system:authenticated"] - }, - "verb": "PUT", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:33:06 +0800", - "requestBody": { - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "paused": false, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements", - "requests": {}, - "limits": {} - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container", - "environmentFrom": [], - "capAdd": [], - "capDrop": [], - "livenessProbe": null, - "volumeMounts": [] - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "name": "nginx", - "namespaceId": "default", - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": ["10.64.3.58"], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport", - "type": "publicEndpoint" - } - ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" - }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" - }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "scheduling": { - "node": {} - }, - "description": "my decript", - "volumes": [] - } -} -``` - -#### Response - -The code sample below depicts an API response, with both its metadata header and body. - -```json -{ - "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", - "responseStatus": "200", - "stage": "ResponseComplete", - "stageTimestamp": "2018-07-20 10:33:06 +0800", - "responseBody": { - "actionLinks": { - "pause": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=pause", - "resume": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=resume", - "rollback": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=rollback" - }, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements" - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container" - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "links": { - "remove": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "revisions": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/revisions", - "self": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "update": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "yaml": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/yaml" - }, - "name": "nginx", - "namespaceId": "default", - "paused": false, - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": ["10.64.3.58"], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport" - } - ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" - }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" - }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - } - } -} -``` diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/arm64-platform/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/arm64-platform/_index.md deleted file mode 100644 index b0a7f913c38..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/arm64-platform/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Running on ARM64 (Experimental) -weight: 3 ---- - -> **Important:** -> -> Running on an ARM64 platform is currently an experimental feature and is not yet officially supported in Rancher. Therefore, we do not recommend using ARM64 based nodes in a production environment. - -The following options are available when using an ARM64 platform: - -- Running Rancher on ARM64 based node(s) - - Only [Docker Install]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) -- Create custom cluster and adding ARM64 based node(s) - - Kubernetes cluster version must be 1.12 or higher - - CNI Network Provider must be [Flannel]({{}}/rancher/v2.x/en/faq/networking/cni-providers/#flannel) -- Importing clusters that contain ARM64 based nodes - - Kubernetes cluster version must be 1.12 or higher - -Please see [Cluster Options]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/) how to configure the cluster options. - -The following features are not tested: - -- Monitoring, alerts, notifiers, pipelines and logging -- Launching apps from the catalog diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/etcd/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/etcd/_index.md deleted file mode 100644 index a605c7343aa..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/etcd/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Tuning etcd for Large Installations -weight: 2 ---- - -When running larger Rancher installations with 15 or more clusters it is recommended to increase the default keyspace for etcd from the default 2GB. The maximum setting is 8GB and the host should have enough RAM to keep the entire dataset in memory. When increasing this value you should also increase the size of the host. The keyspace size can also be adjusted in smaller installations if you anticipate a high rate of change of pods during the garbage collection interval. - -The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.4.0/op-guide/maintenance/#space-quota) setting on the etcd servers. - -### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB - -```yaml -# RKE cluster.yml ---- -services: - etcd: - extra_args: - quota-backend-bytes: 5368709120 -``` - -## Scaling etcd disk performance - -You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.4.0/tuning/#disk) on how to tune the disk priority on the host. - -Additionally, to reduce IO contention on the disks for etcd, you can use a dedicated device for the data and wal directory. Based on etcd best practices, mirroring RAID configurations are unnecessary because etcd replicates data between the nodes in the cluster. You can use stripping RAID configurations to increase available IOPS. - -To implement this solution in an RKE cluster, the `/var/lib/etcd/data` and `/var/lib/etcd/wal` directories will need to have disks mounted and formatted on the underlying host. In the `extra_args` directive of the `etcd` service, you must include the `wal_dir` directory. Without specifying the `wal_dir`, etcd process will try to manipulate the underlying `wal` mount with insufficient permissions. - -```yaml -# RKE cluster.yml ---- -services: - etcd: - extra_args: - data-dir: '/var/lib/rancher/etcd/data/' - wal-dir: '/var/lib/rancher/etcd/wal/wal_dir' - extra_binds: - - '/var/lib/etcd/data:/var/lib/rancher/etcd/data' - - '/var/lib/etcd/wal:/var/lib/rancher/etcd/wal' -``` diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/firewall/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/firewall/_index.md deleted file mode 100644 index 7386ba0880e..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/advanced/firewall/_index.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: Opening Ports with firewalld -weight: 1 ---- - -Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm. - -For example, one Oracle Linux image in AWS has REJECT rules that stop Helm from communicating with Tiller: - -``` -Chain INPUT (policy ACCEPT) -target prot opt source destination -ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED -ACCEPT icmp -- anywhere anywhere -ACCEPT all -- anywhere anywhere -ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh -REJECT all -- anywhere anywhere reject-with icmp-host-prohibited - -Chain FORWARD (policy ACCEPT) -target prot opt source destination -REJECT all -- anywhere anywhere reject-with icmp-host-prohibited - -Chain OUTPUT (policy ACCEPT) -target prot opt source destination -``` - -You can check the default firewall rules with this command: - -``` -sudo iptables --list -``` - -This section describes how to use `firewalld` to apply the [firewall port rules]({{}}/rancher/v2.x/en/installation/references) for nodes in a high-availability Rancher server cluster. - -# Prerequisite - -Install v7.x or later ofv`firewalld`: - -``` -yum install firewalld -systemctl start firewalld -systemctl enable firewalld -``` - -# Applying Firewall Port Rules - -In the Rancher high-availability installation instructions, the Rancher server is set up on three nodes that have all three Kubernetes roles: etcd, controlplane, and worker. If your Rancher server nodes have all three roles, run the following commands on each node: - -``` -firewall-cmd --permanent --add-port=22/tcp -firewall-cmd --permanent --add-port=80/tcp -firewall-cmd --permanent --add-port=443/tcp -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=2379/tcp -firewall-cmd --permanent --add-port=2380/tcp -firewall-cmd --permanent --add-port=6443/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp -firewall-cmd --permanent --add-port=10254/tcp -firewall-cmd --permanent --add-port=30000-32767/tcp -firewall-cmd --permanent --add-port=30000-32767/udp -``` -If your Rancher server nodes have separate roles, use the following commands based on the role of the node: - -``` -# For etcd nodes, run the following commands: -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=2379/tcp -firewall-cmd --permanent --add-port=2380/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp - -# For control plane nodes, run the following commands: -firewall-cmd --permanent --add-port=80/tcp -firewall-cmd --permanent --add-port=443/tcp -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=6443/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp -firewall-cmd --permanent --add-port=10254/tcp -firewall-cmd --permanent --add-port=30000-32767/tcp -firewall-cmd --permanent --add-port=30000-32767/udp - -# For worker nodes, run the following commands: -firewall-cmd --permanent --add-port=22/tcp -firewall-cmd --permanent --add-port=80/tcp -firewall-cmd --permanent --add-port=443/tcp -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp -firewall-cmd --permanent --add-port=10254/tcp -firewall-cmd --permanent --add-port=30000-32767/tcp -firewall-cmd --permanent --add-port=30000-32767/udp -``` - -After the `firewall-cmd` commands have been run on a node, use the following command to enable the firewall rules: - -``` -firewall-cmd --reload -``` - -**Result:** The firewall is updated so that Helm can communicate with the Rancher server nodes. \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/_index.md deleted file mode 100644 index 9287b5f398a..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Installing Rancher in an Air Gapped Environment -weight: 2 ---- - -> This section is under construction. - -This section is about installations of Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. - -For more information on each installation option, refer to [this page.]({{}}/rancher/v2.x/en/installation/) - -Throughout the installation instructions, there will be _tabs_ for each installation option. - -> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker Installation to a Kubernetes Installation. - -# Installation Outline - -1. [Set up private registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/) -2. [Collect and publish images to your private registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) -3. [Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/) - -### [Next: Prepare your Node(s)]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/) diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/install-rancher/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/install-rancher/_index.md deleted file mode 100644 index d631b42f51d..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/install-rancher/_index.md +++ /dev/null @@ -1,343 +0,0 @@ ---- -title: 3. Install Rancher -weight: 400 ---- - -This section is about how to deploy Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. - -> **Note:** These installation instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.x/en/installation/options/air-gap-helm2) provides a copy of the older air gap installation instructions for Rancher installed on Kubernetes with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - - -{{% tabs %}} -{{% tab "Kubernetes Install (Recommended)" %}} - -Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. - -This section describes installing Rancher in five parts: - -- [A. Add the Helm Chart Repository](#a-add-the-helm-chart-repository) -- [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration) -- [C. Render the Rancher Helm Template](#c-render-the-rancher-helm-template) -- [D. Install Rancher](#d-install-rancher) -- [E. For Rancher versions prior to v2.3.0, Configure System Charts](#e-for-rancher-versions-prior-to-v2-3-0-configure-system-charts) - - -### Prerequisite - -Start with any Kubernetes cluster. - -This cluster can use upstream Kubernetes, or it can use one of Rancher's Kubernetes distributions, or it can be a managed Kubernetes cluster from a provider such as Amazon EKS. - -> **Need help installing Kubernetes?** -> -> Our documentation contains instructions for installing Kubernetes with one of Rancher's distributions: -> -> - [Install Kubernetes with the Rancher CLI (recommended for ease of use)]({{}}/rancher/v2.5/en/cli) -> - Install a K3s Kubernetes cluster -> - Install an RKE Kubernetes cluster - -### A. Add the Helm Chart Repository - -From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. - -1. If you haven't already, install `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). - {{< release-channel >}} - ``` - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. -```plain -helm fetch rancher-/rancher -``` - -> Want additional options? Need help troubleshooting? See [Kubernetes Install: Advanced Options]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/#advanced-configurations). - -### B. Choose your SSL Configuration - -Rancher Server is designed to be secure by default and requires SSL/TLS configuration. - -When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination). - -| Configuration | Chart option | Description | Requires cert-manager | -| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | -| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** and does not need to be added when rendering the Helm template. | yes | -| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
This option must be passed when rendering the Rancher Helm template. | no | - -### C. Render the Rancher Helm Template - -When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. - -| Chart Option | Chart Value | Description | -| ----------------------- | -------------------------------- | ---- | -| `certmanager.version` | "" | Configure proper Rancher TLS issuer depending of running cert-manager version. | -| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | - -Based on the choice your made in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), complete one of the procedures below. - -{{% accordion id="self-signed" label="Option A-Default Self-Signed Certificate" %}} - -By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. - -> **Note:** -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). - -1. From a system connected to the internet, add the cert-manager repo to Helm. - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v0.12.0 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - ```plain - helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ - --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller \ - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - ``` -1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - - - Placeholder | Description - ------------|------------- - `` | The version number of the output tarball. - `` | The DNS name you pointed at your load balancer. - `` | The DNS name for your private registry. - `` | Cert-manager version running on k8s cluster. - - ```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` - -{{% /accordion %}} - -{{% accordion id="secret" label="Option B: Certificates From Files using Kubernetes Secrets" %}} - -Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. - -Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------- | -| `` | The version number of the output tarball. | -| `` | The DNS name you pointed at your load balancer. | -| `` | The DNS name for your private registry. | - -```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set privateCA=true \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` - -Then refer to [Adding TLS Secrets]({{}}/rancher/v2.x/en/installation/options/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. - -{{% /accordion %}} - -### D. Install Rancher - -Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. - -Use `kubectl` to create namespaces and apply the rendered manifests. - -If you chose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. - -{{% accordion id="install-cert-manager" label="Self-Signed Certificate Installs - Install Cert-manager" %}} - -If you are using self-signed certificates, install cert-manager: - -1. Create the namespace for cert-manager. -```plain -kubectl create namespace cert-manager -``` - -1. Create the cert-manager CustomResourceDefinitions (CRDs). -```plain -kubectl apply -f cert-manager/cert-manager-crd.yaml -``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above, or else you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Launch cert-manager. -```plain -kubectl apply -R -f ./cert-manager -``` - -{{% /accordion %}} - -Install Rancher: - -```plain -kubectl create namespace cattle-system -kubectl -n cattle-system apply -R -f ./rancher -``` - -**Step Result:** If you are installing Rancher v2.3.0+, the installation is complete. - -### E. For Rancher versions prior to v2.3.0, Configure System Charts - -If you are installing Rancher versions prior to v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0). - -### Additional Resources - -These resources could be helpful when installing Rancher: - -- [Rancher Helm chart options]({{}}/rancher/v2.x/en/installation/options/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.x/en/installation/options/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) - -{{% /tab %}} -{{% tab "Docker Install" %}} - -The Docker installation is for Rancher users that are wanting to **test** out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. **Important: If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker installation to a Kubernetes Installation.** Instead of running the single node installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -| Environment Variable Key | Environment Variable Value | Description | -| -------------------------------- | -------------------------------- | ---- | -| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | - -> **Do you want to...** -> -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.x/en/installation/options/custom-ca-root-certificate/). -> - Record all transactions with the Rancher API? See [API Auditing]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#api-audit-log). - -- For Rancher prior to v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher prior to v2.3.0.]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0) - -Choose from the following options: - -{{% accordion id="option-a" label="Option A-Default Self-Signed Certificate" %}} - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to install. | - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Self-Signed" %}} - -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - -> **Prerequisites:** -> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> -> - The certificate files must be in [PEM format]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#pem). -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#cert-order). - -After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | The path to the certificate authority's certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to install. | - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} -{{% accordion id="option-c" label="Option C-Bring Your Own Certificate: Signed by Recognized CA" %}} - -In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - -> **Prerequisite:** The certificate files must be in [PEM format]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#pem). - -After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to install. | - -> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --no-cacerts \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} - -If you are installing Rancher v2.3.0+, the installation is complete. - -If you are installing Rancher versions prior to v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0). - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/local-system-charts/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/local-system-charts/_index.md deleted file mode 100644 index 36df2bf2e54..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/local-system-charts/_index.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Setting up Local System Charts for Air Gapped Installations -weight: 1120 ---- - -The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. - -In an air gapped installation of Rancher, you will need to configure Rancher to use a local copy of the system charts. This section describes how to use local system charts using a CLI flag in Rancher v2.3.0, and using a Git mirror for Rancher versions prior to v2.3.0. - -# Using Local System Charts in Rancher v2.3.0 - -In Rancher v2.3.0, a local copy of `system-charts` has been packaged into the `rancher/rancher` container. To be able to use these features in an air gap install, you will need to run the Rancher install command with an extra environment variable, `CATTLE_SYSTEM_CATALOG=bundled`, which tells Rancher to use the local copy of the charts instead of attempting to fetch them from GitHub. - -Example commands for a Rancher installation with a bundled `system-charts` are included in the [air gap Docker installation]({{}}/rancher/v2.x/en/installation/air-gap-single-node/install-rancher) instructions and the [air gap Kubernetes installation]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/install-rancher/#c-install-rancher) instructions. - -# Setting Up System Charts for Rancher Prior to v2.3.0 - -### A. Prepare System Charts - -The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach and configure Rancher to use that repository. - -Refer to the release notes in the `system-charts` repository to see which branch corresponds to your version of Rancher. - -### B. Configure System Charts - -Rancher needs to be configured to use your Git mirror of the `system-charts` repository. You can configure the system charts repository either from the Rancher UI or from Rancher's API view. - -{{% tabs %}} -{{% tab "Rancher UI" %}} - -In the catalog management page in the Rancher UI, follow these steps: - -1. Go to the **Global** view. - -1. Click **Tools > Catalogs.** - -1. The system chart is displayed under the name `system-library`. To edit the configuration of the system chart, click **⋮ > Edit.** - -1. In the **Catalog URL** field, enter the location of the Git mirror of the `system-charts` repository. - -1. Click **Save.** - -**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. - -{{% /tab %}} -{{% tab "Rancher API" %}} - -1. Log into Rancher. - -1. Open `https:///v3/catalogs/system-library` in your browser. - - {{< img "/img/rancher/airgap/system-charts-setting.png" "Open">}} - -1. Click **Edit** on the upper right corner and update the value for **url** to the location of the Git mirror of the `system-charts` repository. - - {{< img "/img/rancher/airgap/system-charts-update.png" "Update">}} - -1. Click **Show Request** - -1. Click **Send Request** - -**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/populate-private-registry/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/populate-private-registry/_index.md deleted file mode 100644 index 1d063572262..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/populate-private-registry/_index.md +++ /dev/null @@ -1,275 +0,0 @@ ---- -title: '2. Collect and Publish Images to your Private Registry' -weight: 200 ---- - -This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. - -By default, all images used to [provision Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/) or launch any [tools]({{}}/rancher/v2.x/en/cluster-admin/tools/) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gapped installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. - -Populating the private registry with images is the same process for installing Rancher with Docker and for installing Rancher on a Kubernetes cluster. - -The steps in this section differ depending on whether or not you are planning to use Rancher to provision a downstream cluster with Windows nodes or not. By default, we provide the steps of how to populate your private registry assuming that Rancher will provision downstream Kubernetes clusters with only Linux nodes. But if you plan on provisioning any [downstream Kubernetes clusters using Windows nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/), there are separate instructions to support the images needed. - -> **Prerequisites:** You must have a [private registry](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) available to use. - -{{% tabs %}} -{{% tab "Linux Only Clusters" %}} - -For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. - -1. [Find the required assets for your Rancher version](#1-find-the-required-assets-for-your-rancher-version) -2. [Collect the cert-manager image](#2-collect-the-cert-manager-image) (unless you are bringing your own certificates or terminating TLS on a load balancer) -3. [Save the images to your workstation](#3-save-the-images-to-your-workstation) -4. [Populate the private registry](#4-populate-the-private-registry) - -### Prerequisites - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -If you will use ARM64 hosts, the registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - -### 1. Find the required assets for your Rancher version - -1. Go to our [releases page,](https://github.com/rancher/rancher/releases) find the Rancher v2.x.x release that you want to install, and click **Assets.** Note: Don't use releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's **Assets** section, download the following files, which are required to install Rancher in an air gap environment: - -| Release File | Description | -| ---------------- | -------------- | -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - -### 2. Collect the cert-manager image - -> Skip this step if you are using your own certificates, or if you are terminating TLS on an external load balancer. - -In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. - -1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v0.12.0 - helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt - ``` - -2. Sort and unique the images list to remove any overlap between the sources: - - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - -### 3. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - -### 4. Populate the private registry - -Next, you will move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. - -Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. - -The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. - -1. Log into your private registry if required: - ```plain - docker login - ``` -1. Make `rancher-load-images.sh` an executable: - ``` - chmod +x rancher-load-images.sh - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.sh --image-list ./rancher-images.txt --registry - ``` -{{% /tab %}} -{{% tab "Linux and Windows Clusters" %}} - -For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. - -### Windows Steps - -The Windows images need to be collected and pushed from a Windows server workstation. - -A. Find the required assets for your Rancher version
-B. Save the images to your Windows Server workstation
-C. Prepare the Docker daemon
-D. Populate the private registry - -{{% accordion label="Collecting and Populating Windows Images into the Private Registry"%}} - -### Prerequisites - -These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - -Your registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - -### A. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's "Assets" section, download the following files: - -| Release File | Description | -|----------------------------|------------------| -| `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | -| `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | -| `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | - -### B. Save the images to your Windows Server workstation - -1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. - -1. Run `rancher-save-images.ps1` to create a tarball of all the required images: - ```plain - ./rancher-save-images.ps1 - ``` - - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. - -### C. Prepare the Docker daemon - -Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. - - ``` - { - ... - "allow-nondistributable-artifacts": [ - ... - "" - ] - ... - } - ``` - -### D. Populate the private registry - -Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. - -The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. The `rancher-windows-images.tar.gz` should also be in the same directory. - -1. Using `powershell`, log into your private registry if required: - ```plain - docker login - ``` - -1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.ps1 --registry - ``` - -{{% /accordion %}} - -### Linux Steps - -The Linux images needs to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. - -A. Find the required assets for your Rancher version
-B. Collect all the required images
-C. Save the images to your Linux workstation
-D. Populate the private registry - -{{% accordion label="Collecting and Populating Linux Images into the Private Registry" %}} - -### Prerequisites - -You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - -### A. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. Click **Assets.** - -2. From the release's **Assets** section, download the following files: - -| Release File | Description | -|----------------------------| -------------------------- | -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - -### B. Collect all the required images - -**For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. - - 1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v0.12.0 - helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt - ``` - - 2. Sort and unique the images list to remove any overlap between the sources: - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - -### C. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - -### D. Populate the private registry - -Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. - -The image list, `rancher-images.txt` or `rancher-windows-images.txt`, is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. - -1. Log into your private registry if required: - ```plain - docker login - ``` - -1. Make `rancher-load-images.sh` an executable: - ``` - chmod +x rancher-load-images.sh - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.sh --image-list ./rancher-images.txt \ - --windows-image-list ./rancher-windows-images.txt \ - --registry - ``` - -{{% /accordion %}} - -{{% /tab %}} -{{% /tabs %}} - -### [Next step for Kubernetes Installs - Launch a Kubernetes Cluster]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/) - -### [Next step for Docker Installs - Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/) diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/prepare-registry/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/prepare-registry/_index.md deleted file mode 100644 index a860bf48a9b..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/air-gap/prepare-registry/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: '1. Set up Private Registry' -weight: 100 ---- - -> This page is under construction. - -In this section, you will provision the underlying infrastructure for your Rancher management server in an air gapped environment. You will also set up the private Docker registry that must be available to your Rancher node(s). - -An air gapped environment is an environment where the Rancher server is installed offline or behind a firewall. - -The infrastructure depends on whether you are installing Rancher on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. For more information on each installation option, refer to [this page.]({{}}/rancher/v2.x/en/installation/) - - - -### Set up a Private Docker Registry - -Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file]({{}}/k3s/latest/en/installation/private-registry/) with details from this registry. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) - - -### [Next: Collect and Publish Images to your Private Registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/_index.md deleted file mode 100644 index c49e516a62b..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Encryption -weight: 3 ---- - -> This section is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/custom-ca-root-certificate/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/custom-ca-root-certificate/_index.md deleted file mode 100644 index 924bb8a8203..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/custom-ca-root-certificate/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: About Custom CA Root Certificates -weight: 1 ---- - -If you're using Rancher in an internal production environment where you aren't exposing apps publicly, use a certificate from a private certificate authority (CA). - -Services that Rancher needs to access are sometimes configured with a certificate from a custom/internal CA root, also known as self signed certificate. If the presented certificate from the service cannot be validated by Rancher, the following error displays: `x509: certificate signed by unknown authority`. - -To validate the certificate, the CA root certificates need to be added to Rancher. As Rancher is written in Go, we can use the environment variable `SSL_CERT_DIR` to point to the directory where the CA root certificates are located in the container. The CA root certificates directory can be mounted using the Docker volume option (`-v host-source-directory:container-destination-directory`) when starting the Rancher container. - -Examples of services that Rancher can access: - -- Catalogs -- Authentication providers -- Accessing hosting/cloud API when using Node Drivers - -## Installing with the custom CA Certificate - -For details on starting a Rancher container with your private CA certificates mounted, refer to the installation docs: - -- [Docker Install]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#custom-ca-certificate) - -- [Kubernetes Install]({{}}/rancher/v2.x/en/installation/options/chart-options/#additional-trusted-cas) - diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/tls-secrets/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/tls-secrets/_index.md deleted file mode 100644 index 2bc835cf922..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/tls-secrets/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Adding TLS Secrets -weight: 2 ---- - -Kubernetes will create all the objects and services for Rancher, but it will not become available until we populate the `tls-rancher-ingress` secret in the `cattle-system` namespace with the certificate and key. - -Combine the server certificate followed by any intermediate certificate(s) needed into a file named `tls.crt`. Copy your certificate key into a file named `tls.key`. - -For example, [acme.sh](https://acme.sh) provides server certificate and CA chains in `fullchain.cer` file. -This `fullchain.cer` should be renamed to `tls.crt` & certificate key file as `tls.key`. - -Use `kubectl` with the `tls` secret type to create the secrets. - -``` -kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key -``` - -> **Note:** If you want to replace the certificate, you can delete the `tls-rancher-ingress` secret using `kubectl -n cattle-system delete secret tls-rancher-ingress` and add a new one using the command shown above. If you are using a private CA signed certificate, replacing the certificate is only possible if the new certificate is signed by the same CA as the certificate currently in use. - -### Using a Private CA Signed Certificate - -If you are using a private CA, Rancher requires a copy of the CA certificate which is used by the Rancher Agent to validate the connection to the server. - -Copy the CA certificate into a file named `cacerts.pem` and use `kubectl` to create the `tls-ca` secret in the `cattle-system` namespace. - ->**Important:** Make sure the file is called `cacerts.pem` as Rancher uses that filename to configure the CA certificate. - -``` -kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem -``` - -> **Note:** The configured `tls-ca` secret is retrieved when Rancher starts. On a running Rancher installation the updated CA will take effect after new Rancher pods are started. diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/tls-settings/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/tls-settings/_index.md deleted file mode 100644 index fc57ede4a78..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/tls-settings/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: TLS Settings -weight: 3 ---- - -In Rancher v2.1.7, the default TLS configuration changed to only accept TLS 1.2 and secure TLS cipher suites. TLS 1.3 and TLS 1.3 exclusive cipher suites are not supported. - -## Configuring TLS settings - -The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. - -- [Installing Rancher on a single node with Docker]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#tls-settings) - -- [Installing Rancher on Kubernetes]({{}}/rancher/v2.x/en/installation/options/chart-options/#tls-settings) - -## TLS settings - -| Parameter | Description | Default | Available options | -|-----|-----|-----|-----| -| `CATTLE_TLS_MIN_VERSION` | Minimum TLS version | `1.2` | `1.0`, `1.1`, `1.2` | -| `CATTLE_TLS_CIPHERS` | Allowed TLS cipher suites | `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,`
`TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,`
`TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,`
`TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,`
`TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,`
`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` | See [Golang tls constants](https://golang.org/pkg/crypto/tls/#pkg-constants) | - - -## Legacy configuration - -If you need to configure TLS the same way as it was before Rancher v2.1.7, please use the following settings: - - -| Parameter | Legacy value | -|-----|-----| -| `CATTLE_TLS_MIN_VERSION` | `1.0` | -| `CATTLE_TLS_CIPHERS` | `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,`
`TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,`
`TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,`
`TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,`
`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,`
`TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,`
`TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,`
`TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,`
`TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,`
`TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,`
`TLS_RSA_WITH_AES_128_GCM_SHA256,`
`TLS_RSA_WITH_AES_256_GCM_SHA384,`
`TLS_RSA_WITH_AES_128_CBC_SHA,`
`TLS_RSA_WITH_AES_256_CBC_SHA,`
`TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,`
`TLS_RSA_WITH_3DES_EDE_CBC_SHA` diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/upgrading-cert-manager/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/upgrading-cert-manager/_index.md deleted file mode 100644 index 19ee7d06ee9..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/encryption/upgrading-cert-manager/_index.md +++ /dev/null @@ -1,234 +0,0 @@ ---- -title: Upgrading Cert-Manager -weight: 4 ---- - -Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: - -1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) -1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). This change has no exact deadline. -1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) - -To address these changes, this guide will do two things: - -1. Document the procedure for upgrading cert-manager -1. Explain the cert-manager API changes and link to cert-manager's official documentation for migrating your data - -> **Important:** -> If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: - -> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server -> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager -> 3. Install the newer version of Rancher and cert-manager - -> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. - -> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart]({{}}/rancher/v2.x/en/upgrades/upgrades/ha/#c-upgrade-rancher) under the upgrade Rancher section. - -## Upgrade Cert-Manager - -The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. - -> These instructions have been updated for Helm 3. If you are still using Helm 2, refer to [these instructions.]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions) - -In order to upgrade cert-manager, follow these instructions: - -{{% accordion id="normal" label="Upgrading cert-manager with Internet access" %}} -1. [Back up existing resources](https://cert-manager.io/docs/tutorials/backup/) as a precaution - - ```plain - kubectl get -o yaml --all-namespaces \ - issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml - ``` - - > **Important:** - > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) - -1. [Uninstall existing deployment](https://cert-manager.io/docs/installation/uninstall/kubernetes/#uninstalling-with-helm) - - ```plain - helm uninstall cert-manager - ``` - - Delete the CustomResourceDefinition using the link to the version vX.Y you installed - - ```plain - kubectl delete -f https://raw.githubusercontent.com/jetstack/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - ``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Create the namespace for cert-manager if needed - - ```plain - kubectl create namespace cert-manager - ``` - -1. Add the Jetstack Helm repository - - ```plain - helm repo add jetstack https://charts.jetstack.io - ``` - -1. Update your local Helm chart repository cache - - ```plain - helm repo update - ``` - -1. Install the new version of cert-manager - - ```plain - helm install \ - cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --version v0.12.0 - ``` - -1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) - - ```plain - kubectl apply -f cert-manager-backup.yaml - ``` - -{{% /accordion %}} - -{{% accordion id="airgap" label="Upgrading cert-manager in an airgapped environment" %}} -### Prerequisites - -Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. - -1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.x/en/installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. - -1. From a system connected to the internet, add the cert-manager repo to Helm - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v0.12.0 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - - The Helm 3 command is as follows: - - ```plain - helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ - --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - - The Helm 2 command is as follows: - - ```plain - helm template ./cert-manager-v0.12.0.tgz --output-dir . \ - --name cert-manager --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager (old and new) - - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - curl -L -o cert-manager/cert-manager-crd-old.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml - ``` - -### Install cert-manager - -1. Back up existing resources as a precaution - - ```plain - kubectl get -o yaml --all-namespaces \ - issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml - ``` - - > **Important:** - > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) - -1. Delete the existing cert-manager installation - - ```plain - kubectl -n cert-manager \ - delete deployment,sa,clusterrole,clusterrolebinding \ - -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' - ``` - - Delete the CustomResourceDefinition using the link to the version vX.Y you installed - - ```plain - kubectl delete -f cert-manager/cert-manager-crd-old.yaml - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply -f cert-manager/cert-manager-crd.yaml - ``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Create the namespace for cert-manager - - ```plain - kubectl create namespace cert-manager - ``` - -1. Install cert-manager - - ```plain - kubectl -n cert-manager apply -R -f ./cert-manager - ``` - -1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) - - ```plain - kubectl apply -f cert-manager-backup.yaml - ``` - -{{% /accordion %}} - - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: - -``` -kubectl get pods --namespace cert-manager - -NAME READY STATUS RESTARTS AGE -cert-manager-5c6866597-zw7kh 1/1 Running 0 2m -cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m -cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m -``` - -## Cert-Manager API change and data migration - -Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. - -Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. - -Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be cert-manager.io instead of certmanager.k8s.io. - -We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). - -More info about [cert-manager upgrade information](https://cert-manager.io/docs/installation/upgrading/). - diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/_index.md deleted file mode 100644 index a85228adea1..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "Don't have a Kubernetes cluster? Try one of these tutorials." -weight: 4 ---- - -> This section is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/ha-RKE/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/ha-RKE/_index.md deleted file mode 100644 index c644050196b..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/ha-RKE/_index.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -title: Setting up a High-availability RKE Kubernetes Cluster -shortTitle: Set up RKE Kubernetes -weight: 3 ---- - -> This page is under construction. - -This section describes how to install a Kubernetes cluster according to our [best practices for the Rancher server environment.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) This cluster should be dedicated to run only the Rancher server. - -For Rancher prior to v2.4, Rancher should be installed on an RKE Kubernetes cluster. RKE is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. - -As of Rancher v2.4, the Rancher management server can be installed on either an RKE cluster or a K3s Kubernetes cluster. K3s is also a fully certified Kubernetes distribution released by Rancher, but is newer than RKE. We recommend installing Rancher on K3s because K3s is easier to use, and more lightweight, with a binary size of less than 100 MB. Note: After Rancher is installed on an RKE cluster, there is no migration path to a K3s setup at this time. - -The Rancher management server can only be run on Kubernetes cluster in an infrastructure provider where Kubernetes is installed using RKE or K3s. Use of Rancher on hosted Kubernetes providers, such as EKS, is not supported. - -For systems without direct internet access, refer to [Air Gap: Kubernetes install.]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/) - -> **Single-node Installation Tip:** -> In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. -> -> To set up a single-node RKE cluster, configure only one node in the `cluster.yml` . The single node should have all three roles: `etcd`, `controlplane`, and `worker`. -> -> In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. - -# Installing Kubernetes - -### Required CLI Tools - -Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. - -Also install [RKE,]({{}}/rke/latest/en/installation/) the Rancher Kubernetes Engine, a Kubernetes distribution and command-line tool. - -### 1. Create the cluster configuration file - -In this section, you will create a Kubernetes cluster configuration file called `rancher-cluster.yml`. In a later step, when you set up the cluster with an RKE command, it will use this file to install Kubernetes on your nodes. - -Using the sample below as a guide, create the `rancher-cluster.yml` file. Replace the IP addresses in the `nodes` list with the IP address or DNS names of the 3 nodes you created. - -If your node has public and internal addresses, it is recommended to set the `internal_address:` so Kubernetes will use it for intra-cluster communication. Some services like AWS EC2 require setting the `internal_address:` if you want to use self-referencing security groups or firewalls. - -RKE will need to connect to each node over SSH, and it will look for a private key in the default location of `~/.ssh/id_rsa`. If your private key for a certain node is in a different location than the default, you will also need to configure the `ssh_key_path` option for that node. - -```yaml -nodes: - - address: 165.227.114.63 - internal_address: 172.16.22.12 - user: ubuntu - role: [controlplane, worker, etcd] - - address: 165.227.116.167 - internal_address: 172.16.32.37 - user: ubuntu - role: [controlplane, worker, etcd] - - address: 165.227.127.226 - internal_address: 172.16.42.73 - user: ubuntu - role: [controlplane, worker, etcd] - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -# Required for external TLS termination with -# ingress-nginx v0.22+ -ingress: - provider: nginx - options: - use-forwarded-headers: "true" -``` - -
Common RKE Nodes Options
- -| Option | Required | Description | -| ------------------ | -------- | -------------------------------------------------------------------------------------- | -| `address` | yes | The public DNS or IP address | -| `user` | yes | A user that can run docker commands | -| `role` | yes | List of Kubernetes roles assigned to the node | -| `internal_address` | no | The private DNS or IP address for internal cluster traffic | -| `ssh_key_path` | no | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`) | - -> **Advanced Configurations:** RKE has many configuration options for customizing the install to suit your specific environment. -> -> Please see the [RKE Documentation]({{}}/rke/latest/en/config-options/) for the full list of options and capabilities. -> -> For tuning your etcd cluster for larger Rancher installations, see the [etcd settings guide]({{}}/rancher/v2.x/en/installation/options/etcd/). - -### 2. Run RKE - -``` -rke up --config ./rancher-cluster.yml -``` - -When finished, it should end with the line: `Finished building Kubernetes cluster successfully`. - -### 3. Test Your Cluster - -This section describes how to set up your workspace so that you can interact with this cluster using the `kubectl` command-line tool. - -Assuming you have installed `kubectl`, you need to place the `kubeconfig` file in a location where `kubectl` can reach it. The `kubeconfig` file contains the credentials necessary to access your cluster with `kubectl`. - -When you ran `rke up`, RKE should have created a `kubeconfig` file named `kube_config_rancher-cluster.yml`. This file has the credentials for `kubectl` and `helm`. - -> **Note:** If you have used a different file name from `rancher-cluster.yml`, then the kube config file will be named `kube_config_.yml`. - -Move this file to `$HOME/.kube/config`, or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_rancher-cluster.yml`: - -``` -export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml -``` - -Test your connectivity with `kubectl` and see if all your nodes are in `Ready` state: - -``` -kubectl get nodes - -NAME STATUS ROLES AGE VERSION -165.227.114.63 Ready controlplane,etcd,worker 11m v1.13.5 -165.227.116.167 Ready controlplane,etcd,worker 11m v1.13.5 -165.227.127.226 Ready controlplane,etcd,worker 11m v1.13.5 -``` - -### 4. Check the Health of Your Cluster Pods - -Check that all the required pods and containers are healthy are ready to continue. - -- Pods are in `Running` or `Completed` state. -- `READY` column shows all the containers are running (i.e. `3/3`) for pods with `STATUS` `Running` -- Pods with `STATUS` `Completed` are run-once Jobs. For these pods `READY` should be `0/1`. - -``` -kubectl get pods --all-namespaces - -NAMESPACE NAME READY STATUS RESTARTS AGE -ingress-nginx nginx-ingress-controller-tnsn4 1/1 Running 0 30s -ingress-nginx nginx-ingress-controller-tw2ht 1/1 Running 0 30s -ingress-nginx nginx-ingress-controller-v874b 1/1 Running 0 30s -kube-system canal-jp4hz 3/3 Running 0 30s -kube-system canal-z2hg8 3/3 Running 0 30s -kube-system canal-z6kpw 3/3 Running 0 30s -kube-system kube-dns-7588d5b5f5-sf4vh 3/3 Running 0 30s -kube-system kube-dns-autoscaler-5db9bbb766-jz2k6 1/1 Running 0 30s -kube-system metrics-server-97bc649d5-4rl2q 1/1 Running 0 30s -kube-system rke-ingress-controller-deploy-job-bhzgm 0/1 Completed 0 30s -kube-system rke-kubedns-addon-deploy-job-gl7t4 0/1 Completed 0 30s -kube-system rke-metrics-addon-deploy-job-7ljkc 0/1 Completed 0 30s -kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed 0 30s -``` - -This confirms that you have successfully installed a Kubernetes cluster that the Rancher server will run on. - -### 5. Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) page. - -### [Next: Install Rancher]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/) \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/ha-RKE2/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/ha-RKE2/_index.md deleted file mode 100644 index 761f01d1c76..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/ha-RKE2/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Setting up a High-availability RKE2 Kubernetes Cluster -shortTitle: Set up RKE2 Kubernetes -weight: 4 ---- - -> This page is under construction \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/how-ha-works/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/how-ha-works/_index.md deleted file mode 100644 index 9aaf3bb236d..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/how-ha-works/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: About High-availability Installations -weight: 1 ---- - -We recommend using [Helm,]({{}}/rancher/v2.x/en/overview/architecture/concepts/#about-helm) a Kubernetes package manager, to install Rancher on a dedicated Kubernetes cluster. This is called a high-availability Kubernetes installation because increased availability is achieved by running Rancher on multiple nodes. - -In a standard installation, Kubernetes is first installed on three nodes that are hosted in an infrastructure provider such as Amazon's EC2 or Google Compute Engine. - -Then Helm is used to install Rancher on top of the Kubernetes cluster. Helm uses Rancher's Helm chart to install a replica of Rancher on each of the three nodes in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster, in order to increase Rancher's availability. - -The Rancher server data is stored on etcd. This etcd database also runs on all three nodes, and requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can fail, requiring the cluster to be restored from backup. - -For information on how Rancher works, regardless of the installation method, refer to the [architecture section.]({{}}/rancher/v2.x/en/overview/architecture) - -### Recommended Architecture - -- DNS for Rancher should resolve to a layer 4 load balancer -- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
-![High-availability Kubernetes Installation of Rancher]({{}}/img/rancher/ha/rancher2ha.svg) -Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/_index.md deleted file mode 100644 index c24bffb45ad..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. -shortTitle: Infrastructure Tutorials -weight: 5 ---- - -The K3s documentation has: - -- Instructions for [setting up infrastructure for a high-availability K3s Kubernetes cluster with an external DB]({{}}/k3s/latest/en/installation/tutorials/ha-with-external-db) -- Instructions for [setting up a high-availability K3s Kubernetes cluster with an external DB for a Rancher server]({{}}/k3s/latest/en/installation/tutorials/ha-with-external-db) - -The RKE documentation has: - -- Instructions for [setting up infrastructure for a high-availability RKE Kubernetes cluster]({{}}/) -- Instructions for [setting up a high-availability RKE cluster]() \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md deleted file mode 100644 index ecf063ca94e..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Setting up Nodes in Amazon EC2 -weight: 3 ---- - -In this tutorial, you will learn one way to set up Linux nodes for the Rancher management server. These nodes will fulfill the node requirements for [OS, Docker, hardware, and networking.]({{}}/rancher/v2.x/en/installation/requirements/) - -If the Rancher server will be installed on an RKE Kubernetes cluster, you should provision three instances. - -If the Rancher server will be installed on a K3s Kubernetes cluster, you only need to provision two instances. - -If the Rancher server is installed in a single Docker container, you only need one instance. - -### 1. Optional Preparation - -- **Create IAM role:** To allow Rancher to manipulate AWS resources, such as provisioning new storage or new nodes, you will need to configure Amazon as a cloud provider. There are several things you'll need to do to set up the cloud provider on EC2, but part of this process is setting up an IAM role for the Rancher server nodes. For the full details on setting up the cloud provider, refer to this [page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) -- **Create security group:** We also recommend setting up a security group for the Rancher nodes that complies with the [port requirements for Rancher nodes.]({{}}/rancher/v2.x/en/installation/requirements/#port-requirements) - -### 2. Provision Instances - -1. Log into the [Amazon AWS EC2 Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to take note of the **Region** where your EC2 instances (Linux nodes) are created, because all of the infrastructure for the Rancher management server should be in the same region. -1. In the left panel, click **Instances.** -1. Click **Launch Instance.** -1. In the section called **Step 1: Choose an Amazon Machine Image (AMI),** we will use Ubuntu 18.04 as the Linux OS, using `ami-0d1cd67c26f5fca19 (64-bit x86)`. Go to the Ubuntu AMI and click **Select.** -1. In the **Step 2: Choose an Instance Type** section, select the `t2.medium` type. -1. Click **Next: Configure Instance Details.** -1. In the **Number of instances** field, enter the number of instances. A high-availability K3s cluster requires only two instances, while a high-availability RKE cluster requires three instances. -1. Optional: If you created an IAM role for Rancher to manipulate AWS resources, select the new IAM role in the **IAM role** field. -1. Click **Next: Add Storage,** **Next: Add Tags,** and **Next: Configure Security Group.** -1. In **Step 6: Configure Security Group,** select a security group that complies with the [port requirements]({{}}/rancher/v2.x/en/installation/requirements/#port-requirements) for Rancher nodes. -1. Click **Review and Launch.** -1. Click **Launch.** -1. Choose a new or existing key pair that you will use to connect to your instance later. If you are using an existing key pair, make sure you already have access to the private key. -1. Click **Launch Instances.** - -**Result:** You have created Rancher nodes that satisfy the requirements for OS, hardware, and networking. Next, you will install Docker on each node. - -### 3. Install Docker and Create User - -1. From the [AWS EC2 console,](https://console.aws.amazon.com/ec2/) click **Instances** in the left panel. -1. Go to the instance that you want to install Docker on. Select the instance and click **Actions > Connect.** -1. Connect to the instance by following the instructions on the screen that appears. Copy the Public DNS of the instance. An example command to SSH into the instance is as follows: -``` -sudo ssh -i [path-to-private-key] ubuntu@[public-DNS-of-instance] -``` -1. When you are connected to the instance, run the following command on the instance to create a user: -``` -sudo usermod -aG docker ubuntu -``` -1. Run the following command on the instance to install Docker with one of Rancher's installation scripts: -``` -curl https://releases.rancher.com/install-docker/18.09.sh | sh -``` -1. Repeat these steps so that Docker is installed on each node that will eventually run the Rancher management server. - -> To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher’s Docker installation scripts. - -**Result:** You have set up Rancher server nodes that fulfill all the node requirements for OS, Docker, hardware and networking. - -### Next Steps for RKE Kubernetes Cluster Nodes - -If you are going to install an RKE cluster on the new nodes, take note of the **IPv4 Public IP** and **Private IP** of each node. This information can be found on the **Description** tab for each node after it is created. The public and private IP will be used to populate the `address` and `internal_address` of each node in the RKE cluster configuration file, `rancher-cluster.yml`. - -RKE will also need access to the private key to connect to each node. Therefore, you might want to take note of the path to your private keys to connect to the nodes, which can also be included in the `rancher-cluster.yml` under the `ssh_key_path` directive for each node. \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md deleted file mode 100644 index fde4297dfae..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Setting up an NGINX Load Balancer -weight: 4 ---- - -NGINX will be configured as Layer 4 load balancer (TCP) that forwards connections to one of your Rancher nodes. - -In this configuration, the load balancer is positioned in front of your nodes. The load balancer can be any host capable of running NGINX. - -One caveat: do not use one of your Rancher nodes as the load balancer. - -> These examples show the load balancer being configured to direct traffic to three Rancher server nodes. If Rancher is installed on an RKE Kubernetes cluster, three nodes are required. If Rancher is installed on a K3s Kubernetes cluster, only two nodes are required. - -## Install NGINX - -Start by installing NGINX on the node you want to use as a load balancer. NGINX has packages available for all known operating systems. The versions tested are `1.14` and `1.15`. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). - -The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation on how to install and enable the NGINX `stream` module on your operating system. - -## Create NGINX Configuration - -After installing NGINX, you need to update the NGINX configuration file, `nginx.conf`, with the IP addresses for your nodes. - -1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. - -2. From `nginx.conf`, replace both occurrences (port 80 and port 443) of ``, ``, and `` with the IPs of your [nodes]({{}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/). - - > **Note:** See [NGINX Documentation: TCP and UDP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/) for all configuration options. - -
Example NGINX config
- ``` - worker_processes 4; - worker_rlimit_nofile 40000; - - events { - worker_connections 8192; - } - - stream { - upstream rancher_servers_http { - least_conn; - server :80 max_fails=3 fail_timeout=5s; - server :80 max_fails=3 fail_timeout=5s; - server :80 max_fails=3 fail_timeout=5s; - } - server { - listen 80; - proxy_pass rancher_servers_http; - } - - upstream rancher_servers_https { - least_conn; - server :443 max_fails=3 fail_timeout=5s; - server :443 max_fails=3 fail_timeout=5s; - server :443 max_fails=3 fail_timeout=5s; - } - server { - listen 443; - proxy_pass rancher_servers_https; - } - - } - ``` - - -3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. - -4. Load the updates to your NGINX configuration by running the following command: - - ``` - # nginx -s reload - ``` - -## Option - Run NGINX as Docker container - -Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/nginx.conf:/etc/nginx/nginx.conf \ - nginx:1.14 -``` diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md deleted file mode 100644 index 93dabfc5113..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Setting up Amazon ELB Network Load Balancer -weight: 5 ---- - -This how-to guide describes how to set up a Network Load Balancer (NLB) in Amazon's EC2 service that will direct traffic to multiple instances on EC2. - -These examples show the load balancer being configured to direct traffic to three Rancher server nodes. If Rancher is installed on an RKE Kubernetes cluster, three nodes are required. If Rancher is installed on a K3s Kubernetes cluster, only two nodes are required. - -This tutorial is about one possible way to set up your load balancer, not the only way. Other types of load balancers, such as a Classic Load Balancer or Application Load Balancer, could also direct traffic to the Rancher server nodes. - -Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ALB. - -# Setting up the Load Balancer - -Configuring an Amazon NLB is a multistage process: - -1. [Create Target Groups](#1-create-target-groups) -2. [Register Targets](#2-register-targets) -3. [Create Your NLB](#3-create-your-nlb) -4. [Add listener to NLB for TCP port 80](#4-add-listener-to-nlb-for-tcp-port-80) - -# Requirements - -These instructions assume you have already created Linux instances in EC2. The load balancer will direct traffic to these nodes. - -# 1. Create Target Groups - -Begin by creating two target groups for the **TCP** protocol, one with TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. - -Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but it's convenient to add a listener for port 80, because traffic to port 80 will be automatically redirected to port 443. - -Regardless of whether an NGINX Ingress or Traefik Ingress controller is used, the Ingress should redirect traffic from port 80 to port 443. - -1. Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. -1. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. -1. Click **Create target group** to create the first target group, regarding TCP port 443. - -> **Note:** Health checks are handled differently based on the Ingress. For details, refer to [this section.](#health-check-paths-for-nginx-ingress-and-traefik-ingresses) - -### Target Group (TCP port 443) - -Configure the first target group according to the table below. - -| Option | Setting | -|-------------------|-------------------| -| Target Group Name | `rancher-tcp-443` | -| Target type | `instance` | -| Protocol | `TCP` | -| Port | `443` | -| VPC | Choose your VPC | - -Health check settings: - -| Option | Setting | -|---------------------|-----------------| -| Protocol | TCP | -| Port | `override`,`80` | -| Healthy threshold | `3` | -| Unhealthy threshold | `3` | -| Timeout | `6 seconds` | -| Interval | `10 seconds` | - -Click **Create target group** to create the second target group, regarding TCP port 80. - -### Target Group (TCP port 80) - -Configure the second target group according to the table below. - -| Option | Setting | -|-------------------|------------------| -| Target Group Name | `rancher-tcp-80` | -| Target type | `instance` | -| Protocol | `TCP` | -| Port | `80` | -| VPC | Choose your VPC | - - -Health check settings: - -| Option |Setting | -|---------------------|----------------| -| Protocol | TCP | -| Port | `traffic port` | -| Healthy threshold | `3` | -| Unhealthy threshold | `3` | -| Timeout | `6 seconds` | -| Interval | `10 seconds` | - -# 2. Register Targets - -Next, add your Linux nodes to both target groups. - -Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. - -{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} - -Select the instances (Linux nodes) you want to add, and click **Add to registered**. - -
-**Screenshot Add targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} - -
-**Screenshot Added targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} - -When the instances are added, click **Save** on the bottom right of the screen. - -Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. - -# 3. Create Your NLB - -Use Amazon's Wizard to create a Network Load Balancer. As part of this process, you'll add the target groups you created in [1. Create Target Groups](#1-create-target-groups). - -1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). - -2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. - -3. Click **Create Load Balancer**. - -4. Choose **Network Load Balancer** and click **Create**. Then complete each form. - -- [Step 1: Configure Load Balancer](#step-1-configure-load-balancer) -- [Step 2: Configure Routing](#step-2-configure-routing) -- [Step 3: Register Targets](#step-3-register-targets) -- [Step 4: Review](#step-4-review) - -### Step 1: Configure Load Balancer - -Set the following fields in the form: - -- **Name:** `rancher` -- **Scheme:** `internal` or `internet-facing`. The scheme that you choose for your NLB is dependent on the configuration of your instances and VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. -- **Listeners:** The Load Balancer Protocol should be `TCP` and the corresponding Load Balancer Port should be set to `443`. -- **Availability Zones:** Select Your **VPC** and **Availability Zones**. - -### Step 2: Configure Routing - -1. From the **Target Group** drop-down, choose **Existing target group**. -1. From the **Name** drop-down, choose `rancher-tcp-443`. -1. Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. - -### Step 3: Register Targets - -Since you registered your targets earlier, all you have to do is click **Next: Review**. - -### Step 4: Review - -Look over the load balancer details and click **Create** when you're satisfied. - -After AWS creates the NLB, click **Close**. - -# 4. Add listener to NLB for TCP port 80 - -1. Select your newly created NLB and select the **Listeners** tab. - -2. Click **Add listener**. - -3. Use `TCP`:`80` as **Protocol** : **Port** - -4. Click **Add action** and choose **Forward to...** - -5. From the **Forward to** drop-down, choose `rancher-tcp-80`. - -6. Click **Save** in the top right of the screen. - -# Health Check Paths for NGINX Ingress and Traefik Ingresses - -K3s and RKE Kubernetes clusters handle health checks differently because they use different Ingresses by default. - -For RKE Kubernetes clusters, NGINX Ingress is used by default, whereas for K3s Kubernetes clusters, Traefik is the default Ingress. - -- **Traefik:** The health check path is `/ping`. By default `/ping` is always matched (regardless of Host), and a response from [Traefik itself](https://docs.traefik.io/operations/ping/) is always served. -- **NGINX Ingress:** The default backend of the NGINX Ingress controller has a `/healthz` endpoint. By default `/healthz` is always matched (regardless of Host), and a response from [`ingress-nginx` itself](https://github.com/kubernetes/ingress-nginx/blob/0cbe783f43a9313c9c26136e888324b1ee91a72f/charts/ingress-nginx/values.yaml#L212) is always served. - -To simulate an accurate health check, it is a best practice to use the Host header (Rancher hostname) combined with `/ping` or `/healthz` (for K3s or for RKE clusters, respectively) wherever possible, to get a response from the Rancher Pods, not the Ingress. diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md deleted file mode 100644 index f40b9f96b59..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Setting up a MySQL Database in Amazon RDS -weight: 4 ---- -This tutorial describes how to set up a MySQL database in Amazon's RDS. - -This database can later be used as an external datastore for a high-availability K3s Kubernetes cluster. - -1. Log into the [Amazon AWS RDS Console](https://console.aws.amazon.com/rds/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. -1. In the left panel, click **Databases.** -1. Click **Create database.** -1. In the **Engine type** section, click **MySQL.** -1. In the **Version** section, choose **MySQL 5.7.22.** -1. In **Settings** section, under **Credentials Settings,** enter a master password for the **admin** master username. Confirm the password. -1. Expand the **Additional configuration** section. In the **Initial database name** field, enter a name. The name can have only letters, numbers, and underscores. This name will be used to connect to the database. -1. Click **Create database.** - -You'll need to capture the following information about the new database so that the K3s Kubernetes cluster can connect to it. - -To see this information in the Amazon RDS console, click **Databases,** and click the name of the database that you created. - -- **Username:** Use the admin username. -- **Password:** Use the admin password. -- **Hostname:** Use the **Endpoint** as the hostname. The endpoint is available in the **Connectivity & security** section. -- **Port:** The port should be 3306 by default. You can confirm it in the **Connectivity & security** section. -- **Database name:** Confirm the name by going to the **Configuration** tab. The name is listed under **DB name.** - -This information will be used to connect to the database in the following format: - -``` -mysql://username:password@tcp(hostname:3306)/database-name -``` - -For more information on configuring the datastore for K3s, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/_index.md deleted file mode 100644 index 09b8bca60bd..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/_index.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Installing Rancher on a Single Node Using Docker -description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. -weight: 1 ---- - -> This section is under construction. - -For development and testing environments only, Rancher can be installed by running a single Docker container. - -In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. - -> **Want to use an external load balancer?** -> See [Docker Install with an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/single-node-install-external-lb) instead. - -# Requirements for OS, Docker, Hardware, and Networking - -Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) - -# 1. Provision Linux Host - -Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements) to launch your Rancher server. - -# 2. Choose an SSL Option and Install Rancher - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -> **Do you want to...** -> -> - Use a proxy? See [HTTP Proxy Configuration]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/) -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate/) -> - Complete an Air Gap Installation? See [Air Gap: Docker Install]({{}}/rancher/v2.x/en/installation/air-gap-single-node/) -> - Record all transactions with the Rancher API? See [API Auditing](#api-audit-log) - -Choose from the following options: - -### Option A: Default Rancher-generated Self-signed Certificate - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the minimum installation command below. - -```bash -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher:latest -``` - -### Option B: Bring Your Own Certificate, Self-signed -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - -> **Prerequisites:** -> Create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> -> - The certificate files must be in [PEM format](#pem). -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting](#cert-order). - -After creating your certificate, run the Docker command below to install Rancher. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -| Placeholder | Description | -| ------------------- | --------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | The path to the certificate authority's certificate. | - -```bash -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - rancher/rancher:latest -``` - -### Option C: Bring Your Own Certificate, Signed by a Recognized CA - -In production environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - -> **Prerequisites:** -> -> - The certificate files must be in [PEM format](#pem). -> - In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting](#cert-order). - -After obtaining your certificate, run the Docker command below. - -- Use the `-v` flag and provide the path to your certificates to mount them in your container. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. -- Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -| Placeholder | Description | -| ------------------- | ----------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | - -```bash -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - rancher/rancher:latest \ - --no-cacerts -``` - -### Option D: Let's Encrypt Certificate - -> **Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). - -For production environments, you also have the option of using [Let's Encrypt](https://letsencrypt.org/) certificates. Let's Encrypt uses an http-01 challenge to verify that you have control over your domain. You can confirm that you control the domain by pointing the hostname that you want to use for Rancher access (for example, `rancher.mydomain.com`) to the IP of the machine it is running on. You can bind the hostname to the IP address by creating an A record in DNS. - -> **Prerequisites:** -> -> - Let's Encrypt is an Internet service. Therefore, this option cannot be used in an internal/air gapped network. -> - Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). -> - Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. - -After you fulfill the prerequisites, you can install Rancher using a Let's Encrypt certificate by running the following command. - -| Placeholder | Description | -| ----------------- | ------------------- | -| `` | Your domain address | - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher:latest \ - --acme-domain -``` - -## Advanced Options - -When installing Rancher on a single node with Docker, there are several advanced options that can be enabled: - -- Custom CA Certificate -- API Audit Log -- TLS Settings -- Air Gap -- Persistent Data -- Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node - -Refer to [this page](./advanced) for details. - -## Troubleshooting - -Refer to [this page](./troubleshooting) for frequently asked questions and troubleshooting tips. - -## What's Next? - -- **Recommended:** Review [Single Node Backup and Restoration]({{}}/rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/advanced/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/advanced/_index.md deleted file mode 100644 index 4330a41fe01..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/advanced/_index.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Advanced Options for Docker Installs -weight: 2 ---- - -When installing Rancher, there are several [advanced options]({{}}/rancher/v2.x/en/installation/options/) that can be enabled: - -- [Custom CA Certificate](#custom-ca-certificate) -- [API Audit Log](#api-audit-log) -- [TLS Settings](#tls-settings) -- [Air Gap](#air-gap) -- [Persistent Data](#persistent-data) -- [Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node](#running-rancher-rancher-and-rancher-rancher-agent-on-the-same-node) - -### Custom CA Certificate - -If you want to configure Rancher to use a CA root certificate to be used when validating services, you would start the Rancher container sharing the directory that contains the CA root certificate. - -Use the command example to start a Rancher container with your private CA certificates mounted. - -- The volume flag (`-v`) should specify the host directory containing the CA root certificates. -- The environment variable flag (`-e`) in combination with `SSL_CERT_DIR` and directory declares an environment variable that specifies the mounted CA root certificates directory location inside the container. -- Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. -- Mounting a host directory inside the container can be done using `-v host-source-directory:container-destination-directory` or `--volume host-source-directory:container-destination-directory`. - -The example below is based on having the CA root certificates in the `/host/certs` directory on the host and mounting this directory on `/container/certs` inside the Rancher container. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /host/certs:/container/certs \ - -e SSL_CERT_DIR="/container/certs" \ - rancher/rancher:latest -``` - -### API Audit Log - -The API Audit Log records all the user and system transactions made through Rancher server. - -The API Audit Log writes to `/var/log/auditlog` inside the rancher container by default. Share that directory as a volume and set your `AUDIT_LEVEL` to enable the log. - -See [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing) for more information and options. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /var/log/rancher/auditlog:/var/log/auditlog \ - -e AUDIT_LEVEL=1 \ - rancher/rancher:latest -``` - -### TLS settings - -To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_TLS_MIN_VERSION="1.0" \ - rancher/rancher:latest -``` - -See [TLS settings]({{}}/rancher/v2.x/en/admin-settings/tls-settings) for more information and options. - -### Air Gap - -If you are visiting this page to complete an air gap installation, you must prepend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. - -**Example:** - - /rancher/rancher:latest - -### Persistent Data - -{{< persistentdata >}} - -### Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node - -In the situation where you want to use a single node to run Rancher and to be able to add the same node to a cluster, you have to adjust the host ports mapped for the `rancher/rancher` container. - -If a node is added to a cluster, it deploys the nginx ingress controller which will use port 80 and 443. This will conflict with the default ports we advise to expose for the `rancher/rancher` container. - -Please note that this setup is not recommended for production use, but can be convenient for development/demo purposes. - -To change the host ports mapping, replace the following part `-p 80:80 -p 443:443` with `-p 8080:80 -p 8443:443`: - -``` -docker run -d --restart=unless-stopped \ - -p 8080:80 -p 8443:443 \ - rancher/rancher:latest -``` diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/installing-docker/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/installing-docker/_index.md deleted file mode 100644 index ac20da0afe8..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/installing-docker/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Installing Docker -weight: 1 ---- - -Docker is required to be installed on any node that runs the Rancher server. - -There are a couple of options for installing Docker. One option is to refer to the [official Docker documentation](https://docs.docker.com/install/) about how to install Docker on Linux. The steps will vary based on the Linux distribution. - -Another option is to use one of Rancher's Docker installation scripts, which are available for most recent versions of Docker. - -For example, this command could be used to install Docker 18.09 on Ubuntu: - -``` -curl https://releases.rancher.com/install-docker/18.09.sh | sh -``` - -To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher's Docker installation scripts. \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/proxy/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/proxy/_index.md deleted file mode 100644 index 62445cbc6e1..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/proxy/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: HTTP Proxy Configuration for Docker Installs -weight: 3 ---- - -If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. - -Make sure `NO_PROXY` contains the network addresses, network address ranges and domains that should be excluded from using the proxy. - -| Environment variable | Purpose | -| -------------------- | ----------------------------------------------------------------------------------------------------------------------- | -| HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) | -| HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) | -| NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) | - -> **Note** NO_PROXY must be in uppercase to use network range (CIDR) notation. - -## Docker Installation - -Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. Required values for `NO_PROXY` in a [Docker Installation]({{}}/rancher/v2.x/en/installation/single-node-install/) are: - -- `localhost` -- `127.0.0.1` -- `0.0.0.0` -- `10.0.0.0/8` - -The example below is based on a proxy server accessible at `http://192.168.0.1:3128`, and excluding usage the proxy when accessing network range `192.168.10.0/24` and every hostname under the domain `example.com`. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e HTTP_PROXY="http://192.168.10.1:3128" \ - -e HTTPS_PROXY="http://192.168.10.1:3128" \ - -e NO_PROXY="localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,192.168.10.0/24,example.com" \ - rancher/rancher:latest -``` diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/single-node-install-external-lb/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/single-node-install-external-lb/_index.md deleted file mode 100644 index d02f30e551e..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/install/resources/single-node-docker/single-node-install-external-lb/_index.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -title: Docker Install with TLS Termination at Layer-7 NGINX Load Balancer -weight: 4 ---- - -For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. - -A layer-7 load balancer can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. - -This install procedure walks you through deployment of Rancher using a single container, and then provides a sample configuration for a layer-7 NGINX load balancer. - -> **Want to skip the external load balancer?** -> See [Docker Installation]({{}}/rancher/v2.x/en/installation/single-node) instead. - -## Requirements for OS, Docker, Hardware, and Networking - -Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) - -## Installation Outline - - - -- [1. Provision Linux Host](#1-provision-linux-host) -- [2. Choose an SSL Option and Install Rancher](#2-choose-an-ssl-option-and-install-rancher) -- [3. Configure Load Balancer](#3-configure-load-balancer) - - - -## 1. Provision Linux Host - -Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements) to launch your {{< product >}} Server. - -## 2. Choose an SSL Option and Install Rancher - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -> **Do you want to...** -> -> - Complete an Air Gap Installation? -> - Record all transactions with the Rancher API? -> -> See [Advanced Options](#advanced-options) below before continuing. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A-Bring Your Own Certificate: Self-Signed" %}} -If you elect to use a self-signed certificate to encrypt communication, you must install the certificate on your load balancer (which you'll do later) and your Rancher container. Run the Docker command to deploy Rancher, pointing it toward your certificate. - -> **Prerequisites:** -> Create a self-signed certificate. -> -> - The certificate files must be in [PEM format](#pem). - -**To Install Rancher Using a Self-Signed Cert:** - -1. While running the Docker command to deploy Rancher, point Docker toward your CA certificate file. - - ``` - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/your_certificate_directory/cacerts.pem:/etc/rancher/ssl/cacerts.pem \ - rancher/rancher:latest - ``` - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Signed by Recognized CA" %}} -If your cluster is public facing, it's best to use a certificate signed by a recognized CA. - -> **Prerequisites:** -> -> - The certificate files must be in [PEM format](#pem). - -**To Install Rancher Using a Cert Signed by a Recognized CA:** - -If you use a certificate signed by a recognized CA, installing your certificate in the Rancher container isn't necessary. We do have to make sure there is no default CA certificate generated and stored, you can do this by passing the `--no-cacerts` parameter to the container. - -1. Enter the following command. - - ``` - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher:latest --no-cacerts - ``` - - {{% /accordion %}} - -## 3. Configure Load Balancer - -When using a load balancer in front of your Rancher container, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https` header, this redirect is disabled. - -The load balancer or proxy has to be configured to support the following: - -- **WebSocket** connections -- **SPDY** / **HTTP/2** protocols -- Passing / setting the following headers: - - | Header | Value | Description | - |--------|-------|-------------| - | `Host` | Hostname used to reach Rancher. | To identify the server requested by the client. - | `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer or proxy.

**Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. - | `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer or proxy. - | `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. -### Example NGINX configuration - -This NGINX configuration is tested on NGINX 1.14. - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). - -- Replace `rancher-server` with the IP address or hostname of the node running the Rancher container. -- Replace both occurrences of `FQDN` to the DNS name for Rancher. -- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. - -``` -worker_processes 4; -worker_rlimit_nofile 40000; - -events { - worker_connections 8192; -} - -http { - upstream rancher { - server rancher-server:80; - } - - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - server { - listen 443 ssl http2; - server_name FQDN; - ssl_certificate /certs/fullchain.pem; - ssl_certificate_key /certs/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } - } - - server { - listen 80; - server_name FQDN; - return 301 https://$server_name$request_uri; - } -} -``` - -
- -## What's Next? - -- **Recommended:** Review [Single Node Backup and Restoration]({{}}/rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). - -
- -## FAQ and Troubleshooting - -{{< ssl_faq_single >}} - -## Advanced Options - -### API Auditing - -If you want to record all transactions with the Rancher API, enable the [API Auditing]({{}}/rancher/v2.x/en/installation/api-auditing) feature by adding the flags below into your install command. - - -e AUDIT_LEVEL=1 \ - -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ - -e AUDIT_LOG_MAXAGE=20 \ - -e AUDIT_LOG_MAXBACKUP=20 \ - -e AUDIT_LOG_MAXSIZE=100 \ - -### Air Gap - -If you are visiting this page to complete an [Air Gap Installation]({{}}/rancher/v2.x/en/installation/air-gap-installation/), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. - -**Example:** - - /rancher/rancher:latest - -### Persistent Data - -{{< persistentdata >}} - -This layer 7 NGINX configuration is tested on NGINX version 1.13 (mainline) and 1.14 (stable). - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). - -``` -upstream rancher { - server rancher-server:80; -} - -map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; -} - -server { - listen 443 ssl http2; - server_name rancher.yourdomain.com; - ssl_certificate /etc/your_certificate_directory/fullchain.pem; - ssl_certificate_key /etc/your_certificate_directory/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } -} - -server { - listen 80; - server_name rancher.yourdomain.com; - return 301 https://$server_name$request_uri; -} -``` - -
- diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/upgrade/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/upgrade/_index.md deleted file mode 100644 index 37c71224458..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/upgrade/_index.md +++ /dev/null @@ -1,222 +0,0 @@ ---- -title: Upgrade -weight: 2 ---- - -> This page is under construction. - -The following instructions will guide you through using Helm to upgrade a Rancher server that was installed on a Kubernetes cluster. - -### Note on Upgrading the Rancher Server Kubernetes Cluster - -If Rancher is installed on an RKE Kubernetes cluster, refer to the [RKE documentation on upgrading the cluster.](https://rancher.com/docs/rke/latest/en/upgrades/) - -If Rancher is installed on a K3s Kubernetes cluster, refer to the [K3s documentation on upgrading the cluster.](https://rancher.com/docs/k3s/latest/en/upgrades/basic/) - -If Rancher is installed on another type of Kubernetes cluster, refer to the official documentation about upgrades for that Kubernetes distribution. - - -### Known Upgrade Issues - -A list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) - -### Caveats -Upgrades _to_ or _from_ any chart in the [rancher-alpha repository]({{}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories/) aren't supported. - -### Recovering from Unsuccessful Upgrades - -If you upgrade Rancher and the upgrade does not complete successfully, you may need to roll back your Rancher Server to its last healthy state. - -Restoring a snapshot of the Rancher Server cluster will revert Rancher to the version and state at the time of the snapshot. - ->**Note:** Managed cluster are authoritative for their state. This means restoring the Rancher server will not revert workload deployments or changes made on managed clusters after the snapshot was taken. - -# Prerequisites - -- **Review the [known upgrade issues]({{}}/rancher/v2.x/en/upgrades/upgrades/#known-upgrade-issues) and [caveats]({{}}/rancher/v2.x/en/upgrades/upgrades/#caveats)** in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) -- **For [air gap installs only,]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. - -# Upgrade Outline - -Follow the steps to upgrade Rancher server: - -- [A. Back up your Kubernetes cluster that is running Rancher server](#a-backup-your-kubernetes-cluster-that-is-running-rancher-server) -- [B. Update the Helm chart repository](#b-update-the-helm-chart-repository) -- [C. Upgrade Rancher](#c-upgrade-rancher) -- [D. Verify the Upgrade](#d-verify-the-upgrade) - -### A. Back up Your Kubernetes Cluster that is Running Rancher Server - -[Take a one-time snapshot]({{}}/rancher/v2.x/en/backups/backups/ha-backups/#option-b-one-time-snapshots) -of your Kubernetes cluster running Rancher server. You'll use the snapshot as a restoration point if something goes wrong during upgrade. - -### B. Update the Helm chart repository - -1. Update your local helm repo cache. - - ``` - helm repo update - ``` - -1. Get the repository name that you used to install Rancher. - - For information about the repos and their differences, see [Helm Chart Repositories]({{}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). - - {{< release-channel >}} - - ``` - helm repo list - - NAME URL - stable https://kubernetes-charts.storage.googleapis.com - rancher- https://releases.rancher.com/server-charts/ - ``` - - > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{}}/rancher/v2.x/en/installation/options/server-tags/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. - - -1. Fetch the latest chart to install Rancher from the Helm chart repository. - - This command will pull down the latest charts and save it in the current directory as a `.tgz` file. - - ```plain - helm fetch rancher-/rancher - ``` - -### C. Upgrade Rancher - -This section describes how to upgrade normal (Internet-connected) or air gap installations of Rancher with Helm. - -{{% tabs %}} -{{% tab "Kubernetes Upgrade" %}} - -Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. - -``` -helm get values rancher -n cattle-system - -hostname: rancher.my.org -``` - -> **Note:** There will be more values that are listed with this command. This is just an example of one of the values. - -If you are also upgrading cert-manager to the latest version from a version older than 0.11.0, follow `Option B: Reinstalling Rancher and cert-manager`. Otherwise, follow `Option A: Upgrading Rancher`. - -{{% accordion label="Option A: Upgrading Rancher" %}} - -Upgrade Rancher to the latest version with all your settings. - -Take all the values from the previous step and append them to the command using `--set key=value`: - -``` -helm upgrade rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org -``` - -> **Note:** There will be many more options from the previous step that need to be appended. - -{{% /accordion %}} - -{{% accordion label="Option B: Reinstalling Rancher and cert-manager" %}} - -If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manger due to the API change in cert-manger v0.11. - -1. Uninstall Rancher - - ``` - helm delete rancher -n cattle-system - ``` - -2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) page. - -3. Reinstall Rancher to the latest version with all your settings. Take all the values from the step 1 and append them to the command using `--set key=value`. Note: There will be many more options from the step 1 that need to be appended. - - ``` - helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org - ``` - -{{% /accordion %}} - -{{% /tab %}} - -{{% tab "Kubernetes Air Gap Upgrade" %}} - -1. Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - - Based on the choice you made during installation, complete one of the procedures below. - - Placeholder | Description - ------------|------------- - `` | The version number of the output tarball. - `` | The DNS name you pointed at your load balancer. - `` | The DNS name for your private registry. - `` | Cert-manager version running on k8s cluster. - -{{% accordion id="self-signed" label="Option A-Default Self-Signed Certificate" %}} - - ```plain -helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -{{% /accordion %}} -{{% accordion id="secret" label="Option B: Certificates From Files using Kubernetes Secrets" %}} - -```plain -helm template ./rancher-.tgz --output-dir . \ ---name rancher \ ---namespace cattle-system \ ---set hostname= \ ---set rancherImage=/rancher/rancher \ ---set ingress.tls.source=secret \ ---set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher ---set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain -helm template ./rancher-.tgz --output-dir . \ ---name rancher \ ---namespace cattle-system \ ---set hostname= \ ---set rancherImage=/rancher/rancher \ ---set ingress.tls.source=secret \ ---set privateCA=true \ ---set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher ---set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -{{% /accordion %}} - -2. Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. - - Use `kubectl` to apply the rendered manifests. - - ```plain - kubectl -n cattle-system apply -R -f ./rancher - ``` - -{{% /tab %}} -{{% /tabs %}} - -### D. Verify the Upgrade - -Log into Rancher to confirm that the upgrade succeeded. - ->**Having network issues following upgrade?** -> -> See [Restoring Cluster Networking]({{}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). - -## Rolling Back - -Should something go wrong, follow the [roll back]({{}}/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. diff --git a/content/rancher/v2.5/en/install-rancher-on-k8s/upgrade/rollbacks/_index.md b/content/rancher/v2.5/en/install-rancher-on-k8s/upgrade/rollbacks/_index.md deleted file mode 100644 index 940b6fd4f89..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-k8s/upgrade/rollbacks/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Rolling Back the Rancher Helm Chart -weight: 1 ---- - -> This page is under construction. - -If you upgrade Rancher and the upgrade does not complete successfully, you may need to [restore Rancher from backup.](../../backups/restores) - -Restoring a snapshot of the Rancher Server cluster will revert Rancher to the version and state at the time of the snapshot. - ->**Note:** Managed clusters are authoritative for their state. This means restoring the rancher server will not revert workload deployments or changes made on managed clusters after the snapshot was taken. diff --git a/content/rancher/v2.5/en/install-rancher-on-linux/_index.md b/content/rancher/v2.5/en/install-rancher-on-linux/_index.md deleted file mode 100644 index a39d752685c..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-linux/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Install on a Linux OS -weight: 2 ---- - -Rancher is both a platform and a Kubernetes distribution. In this section you'll learn how to install Rancher as a Helm chart on an existing Kubernetes cluster, without installing the Rancher Kubernetes distribution. - -There are two main ways that Rancher can be installed: - -1. You can use Helm to install the Rancher Helm chart on any Kubernetes cluster. -2. You can use the Rancher CLI to install a Rancher Kubernetes cluster. This cluster comes with the Rancher Helm chart built in. - -The installation path that you choose will affect the way that you upgrade Rancher, but not the way that Rancher is backed up and restored. - -This section focuses on the installation path in which the Rancher CLI is used to provision a new Kubernetes cluster with the Rancher Helm chart built in. diff --git a/content/rancher/v2.5/en/install-rancher-on-linux/install/_index.md b/content/rancher/v2.5/en/install-rancher-on-linux/install/_index.md deleted file mode 100644 index 1bb080bd47f..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-linux/install/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Install -weight: 1 ---- - -> This page is under construction. - -In this section, you'll learn how to create a Rancher Kubernetes cluster that comes with the Rancher Helm chart built in. - -> To install Rancher on an existing Kubernetes cluster, refer to the instructions on [installing the Rancher Helm chart.](../../rancher-helm-install-upgrade/installation) \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-linux/upgrade/_index.md b/content/rancher/v2.5/en/install-rancher-on-linux/upgrade/_index.md deleted file mode 100644 index ba7375d5f63..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-linux/upgrade/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Upgrade -weight: 2 ---- - -> This page is under construction. - -This page is about how to upgrade the local Kubernetes cluster that Rancher is installed on, as well as the Rancher Helm chart. - -This information applies if you set up the local Rancher server cluster using the Rancher CLI. - -To upgrade the Rancher server's local Kubernetes cluster using the Rancher CLI, you will first upgrade the master nodes of the cluster, and then the worker nodes. - -### About Rancher Kubernetes - -Rancher is intended to be installed on any Kubernetes cluster. - -The Rancher CLI comes with a Kubernetes distribution called Rancher Kubernetes, which allows you to install Kubernetes more easily as a prerequisite to installing Rancher. - -Rancher Kubernetes clusters can also be imported into Rancher. - - -### Known Upgrade Issues - -A list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) - -### Caveats -Upgrades _to_ or _from_ any chart in the [rancher-alpha repository]({{}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories/) aren't supported. \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-linux/upgrade/cluster-upgrade/_index.md b/content/rancher/v2.5/en/install-rancher-on-linux/upgrade/cluster-upgrade/_index.md deleted file mode 100644 index f33386702a1..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-linux/upgrade/cluster-upgrade/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Cluster Upgrade -weight: 2 ---- - -> This section is under construction. - -In this section, you'll learn how to upgrade the Rancher Helm chart and the master nodes of the Rancher Kubernetes cluster, if applicable. \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-linux/upgrade/cluster-upgrade/woker-node-upgrade/_index.md b/content/rancher/v2.5/en/install-rancher-on-linux/upgrade/cluster-upgrade/woker-node-upgrade/_index.md deleted file mode 100644 index 76f04a09ac2..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-linux/upgrade/cluster-upgrade/woker-node-upgrade/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Worker Node Upgrade -weight: 1 ---- - -> This page is under construction. - -In this section, you'll learn how to upgrade the worker nodes of the Rancher Kubernetes cluster, if applicable. \ No newline at end of file diff --git a/content/rancher/v2.5/en/install-rancher-on-linux/upgrade/image-upgrade/_index.md b/content/rancher/v2.5/en/install-rancher-on-linux/upgrade/image-upgrade/_index.md deleted file mode 100644 index c68c3d159e7..00000000000 --- a/content/rancher/v2.5/en/install-rancher-on-linux/upgrade/image-upgrade/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Upgrade the Rancher Image -weight: 1 ---- - -> This page is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/istio/_index.md b/content/rancher/v2.5/en/istio/_index.md deleted file mode 100644 index 6bb8cb5b3cb..00000000000 --- a/content/rancher/v2.5/en/istio/_index.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: Istio -weight: 13 ---- - -> This section is under construction. - - [Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. - - As a network of microservices changes and grows, the interactions between them can become more difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. - -Our integration of Istio is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. - -This service mesh provides features that include but are not limited to the following: - -- Traffic management features -- Enhanced monitoring and tracing -- Service discovery and routing -- Secure connections and service-to-service authentication with mutual TLS -- Load balancing -- Automatic retries, backoff, and circuit breaking - -After Istio is enabled in a cluster, you can leverage Istio's control plane functionality with `kubectl`. - -Rancher's Istio integration comes with comprehensive visualization aids: - -- **Trace the root cause of errors with Jaeger.** [Jaeger](https://www.jaegertracing.io/) is an open-source tool that provides a UI for a distributed tracing system, which is useful for root cause analysis and for determining what causes poor performance. Distributed tracing allows you to view an entire chain of calls, which might originate with a user request and traverse dozens of microservices. -- **Get the full picture of your microservice architecture with Kiali.** [Kiali](https://www.kiali.io/) provides a diagram that shows the services within a service mesh and how they are connected, including the traffic rates and latencies between them. You can check the health of the service mesh, or drill down to see the incoming and outgoing requests to a single component. -- **Gain insights from time series analytics with Grafana dashboards.** [Grafana](https://grafana.com/) is an analytics platform that allows you to query, visualize, alert on and understand the data gathered by Prometheus. -- **Write custom queries for time series data with the Prometheus UI.** [Prometheus](https://prometheus.io/) is a systems monitoring and alerting toolkit. Prometheus scrapes data from your cluster, which is then used by Grafana. A Prometheus UI is also integrated into Rancher, and lets you write custom queries for time series data and see the results in the UI. - -# Prerequisites - -Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough [CPU and memory]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources) to run all of the components of Istio. - -# Setup Guide - -Refer to the [setup guide]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup) for instructions on how to set up Istio and use it in a project. - -# Disabling Istio - -To remove Istio components from a cluster, namespace, or workload, refer to the section on [disabling Istio.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/disabling-istio) - -# Accessing Visualizations - -> By default, only cluster owners have access to Jaeger and Kiali. For instructions on how to allow project members to access them, refer to [Access to Visualizations.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/rbac/#access-to-visualizations) - -After Istio is set up in a cluster, Grafana, Prometheus, Jaeger, and Kiali are available in the Rancher UI. - -Your access to the visualizations depend on your role. Grafana and Prometheus are only available for cluster owners. The Kiali and Jaeger UIs are available only to cluster owners by default, but cluster owners can allow project members to access them by editing the Istio settings. When you go to your project and click **Resources > Istio,** you can go to each UI for Kiali, Jaeger, Grafana, and Prometheus by clicking their icons in the top right corner of the page. - -To see the visualizations, go to the cluster where Istio is set up and click **Tools > Istio.** You should see links to each UI at the top of the page. - -You can also get to the visualization tools from the project view. - -# Viewing the Kiali Traffic Graph - -1. From the project view in Rancher, click **Resources > Istio.** -1. If you are a cluster owner, you can go to the **Traffic Graph** tab. This tab has the Kiali network visualization integrated into the UI. - -# Viewing Traffic Metrics - -Istio’s monitoring features provide visibility into the performance of all your services. - -1. From the project view in Rancher, click **Resources > Istio.** -1. Go to the **Traffic Metrics** tab. After traffic is generated in your cluster, you should be able to see metrics for **Success Rate, Request Volume, 4xx Response Count, Project 5xx Response Count** and **Request Duration.** Cluster owners can see all of the metrics, while project members can see a subset of the metrics. - -# Architecture - -Istio installs a service mesh that uses [Envoy](https://www.envoyproxy.io/learn/service-mesh) sidecar proxies to intercept traffic to each workload. These sidecars intercept and manage service-to-service communication, allowing fine-grained observation and control over traffic within the cluster. - -Only workloads that have the Istio sidecar injected can be tracked and controlled by Istio. - -Enabling Istio in Rancher enables monitoring in the cluster, and enables Istio in all new namespaces that are created in a cluster. You need to manually enable Istio in preexisting namespaces. - -When a namespace has Istio enabled, new workloads deployed in the namespace will automatically have the Istio sidecar. You need to manually enable Istio in preexisting workloads. - -For more information on the Istio sidecar, refer to the [Istio docs](https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/). - -### Two Ingresses - -By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. To allow Istio to receive external traffic, you need to enable the Istio ingress gateway for the cluster. The result is that your cluster will have two ingresses. - -![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) \ No newline at end of file diff --git a/content/rancher/v2.5/en/istio/legacy/_index.md b/content/rancher/v2.5/en/istio/legacy/_index.md deleted file mode 100644 index 9240f82fa7f..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Legacy UI Docs -weight: 2 ---- - -> This section is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/istio/legacy/disabling-istio/_index.md b/content/rancher/v2.5/en/istio/legacy/disabling-istio/_index.md deleted file mode 100644 index d2035689626..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/disabling-istio/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Disabling Istio -weight: 4 ---- - -This section describes how to disable Istio in a cluster, namespace, or workload. - -# Disable Istio in a Cluster - -To disable Istio, - -1. From the **Global** view, navigate to the cluster that you want to disable Istio for. -1. Click **Tools > Istio.** -1. Click **Disable,** then click the red button again to confirm the disable action. - -**Result:** The `cluster-istio` application in the cluster's `system` project gets removed. The Istio sidecar cannot be deployed on any workloads in the cluster. - -# Disable Istio in a Namespace - -1. In the Rancher UI, go to the project that has the namespace where you want to disable Istio. -1. On the **Workloads** tab, you will see a list of namespaces and the workloads deployed in them. Go to the namespace where you want to disable and click the **⋮ > Disable Istio Auto Injection.** - -**Result:** When workloads are deployed in this namespace, they will not have the Istio sidecar. - -# Remove the Istio Sidecar from a Workload - -Disable Istio in the namespace, then redeploy the workloads with in it. They will be deployed without the Istio sidecar. \ No newline at end of file diff --git a/content/rancher/v2.5/en/istio/legacy/rbac/_index.md b/content/rancher/v2.5/en/istio/legacy/rbac/_index.md deleted file mode 100644 index eb6f3c20fa7..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/rbac/_index.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: Role-based Access Control -weight: 3 ---- - -This section describes the permissions required to access Istio features and how to configure access to the Kiali and Jaeger visualizations. - -# Cluster-level Access - -By default, only cluster administrators can: - -- Enable Istio for the cluster -- Configure resource allocations for Istio -- View each UI for Prometheus, Grafana, Kiali, and Jaeger - -# Project-level Access - -After Istio is enabled in a cluster, project owners and members have permission to: - -- Enable and disable Istio sidecar auto-injection for namespaces -- Add the Istio sidecar to workloads -- View the traffic metrics and traffic graph for the cluster -- View the Kiali and Jaeger visualizations if cluster administrators give access to project members -- Configure Istio's resources (such as the gateway, destination rules, or virtual services) with `kubectl` (This does not apply to read-only project members) - -# Access to Visualizations - -By default, the Kiali and Jaeger visualizations are restricted to the cluster owner because the information in them could be sensitive. - -**Jaeger** provides a UI for a distributed tracing system, which is useful for root cause analysis and for determining what causes poor performance. - -**Kiali** provides a diagram that shows the services within a service mesh and how they are connected. - -Rancher supports giving groups permission to access Kiali and Jaeger, but not individuals. - -To configure who has permission to access the Kiali and Jaeger UI, - -1. Go to the cluster view and click **Tools > Istio.** -1. Then go to the **Member Access** section. If you want to restrict access to certain groups, choose **Allow cluster owner and specified members to access Kiali and Jaeger UI.** Search for the groups that you want to have access to Kiali and Jaeger. If you want all members to have access to the tools, click **Allow all members to access Kiali and Jaeger UI.** -1. Click **Save.** - -**Result:** The access levels for Kiali and Jaeger have been updated. - -# Summary of Default Permissions for Istio Users - -| Permission | Cluster Administrators | Project Owners | Project Members | Read-only Project Members | -|------------------------------------------|----------------|----------------|-----------------|---------------------------| -| Enable and disable Istio for the cluster | ✓ | | | | -| Configure Istio resource limits | ✓ | | | | -| Control who has access to Kiali and the Jaeger UI | ✓ | | | | -| Enable and disable Istio for a namespace | ✓ | ✓ | ✓ | | -| Enable and disable Istio on workloads | ✓ | ✓ | ✓ | | -| Configure Istio with `kubectl` | ✓ | ✓ | ✓ | | -| View Prometheus UI and Grafana UI | ✓ | | | | -| View Kiali UI and Jaeger UI ([Configurable](#access-to-visualizations)) | ✓ | | | | -| View Istio project dashboard, including traffic metrics* | ✓ | ✓ | ✓ | ✓ | - -* By default, only the cluster owner will see the traffic graph. Project members will see only a subset of traffic metrics. Project members cannot see the traffic graph because it comes from Kiali, and access to Kiali is restricted to cluster owners by default. \ No newline at end of file diff --git a/content/rancher/v2.5/en/istio/legacy/release-notes/_index.md b/content/rancher/v2.5/en/istio/legacy/release-notes/_index.md deleted file mode 100644 index 79cadd9be69..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/release-notes/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Release Notes -weight: 5 ---- - - -# Istio 1.5.8 - -### Important note on 1.5.x versions - -When upgrading from any 1.4 version of Istio to any 1.5 version, the Rancher installer will delete several resources in order to complete the upgrade, at which point they will be immediately re-installed. This includes the `istio-reader-service-account`. If your Istio installation is using this service account be aware that any secrets tied to the service account will be deleted. Most notably this will **break specific [multi-cluster deployments](https://archive.istio.io/v1.4/docs/setup/install/multicluster/)**. Downgrades back to 1.4 are not possible. - -See the official upgrade notes for additional information on the 1.5 release and upgrading from 1.4: https://istio.io/latest/news/releases/1.5.x/announcing-1.5/upgrade-notes/ - -> **Note:** Rancher continues to use the Helm installation method, which produces a different architecture from an istioctl installation. - -### Known Issues - -* The Kiali traffic graph is currently not working [#24924](https://github.com/istio/istio/issues/24924) diff --git a/content/rancher/v2.5/en/istio/legacy/resources/_index.md b/content/rancher/v2.5/en/istio/legacy/resources/_index.md deleted file mode 100644 index 6d9c4e70d24..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/resources/_index.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: CPU and Memory Allocations -weight: 1 ---- - -This section describes the minimum recommended computing resources for the Istio components in a cluster. - -The CPU and memory allocations for each component are [configurable.](#configuring-resource-allocations) - -Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough CPU and memory to run all of the components of Istio. - -> **Tip:** In larger deployments, it is strongly advised that the infrastructure be placed on dedicated nodes in the cluster by adding a node selector for each Istio component. - -The table below shows a summary of the minimum recommended resource requests and limits for the CPU and memory of each central Istio component. - -In Kubernetes, the resource request indicates that the workload will not deployed on a node unless the node has at least the specified amount of memory and CPU available. If the workload surpasses the limit for CPU or memory, it can be terminated or evicted from the node. For more information on managing resource limits for containers, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) - -Workload | Container | CPU - Request | Mem - Request | CPU - Limit | Mem - Limit | Configurable ----------|-----------|---------------|---------------|-------------|-------------|------------- -istio-pilot |discovery| 500m | 2048Mi | 1000m | 4096Mi | Y - istio-telemetry |mixer| 1000m | 1024Mi | 4800m | 4096Mi | Y - istio-policy | mixer | 1000m | 1024Mi | 4800m | 4096Mi | Y - istio-tracing | jaeger | 100m | 100Mi | 500m | 1024Mi | Y - prometheus | prometheus | 750m | 750Mi | 1000m | 1024Mi | Y - grafana | grafana | 100m | 100Mi | 200m | 512Mi | Y - Others | - | 500m | 500Mi | - | - | N - **Total** | **-** | **3950m** | **5546Mi** | **>12300m** | **>14848Mi** | **-** - - -# Configuring Resource Allocations - -You can individually configure the resource allocation for each type of Istio component. This section includes the default resource allocations for each component. - -To make it easier to schedule the workloads to a node, a cluster administrator can reduce the CPU and memory resource requests for the component. However, the default CPU and memory allocations are the minimum that we recommend. - -You can find more information about Istio configuration in the [official Istio documentation](https://istio.io/docs/concepts/what-is-istio). - -To configure the resources allocated to an Istio component, - -1. In Rancher, go to the cluster where you have Istio installed. -1. Click **Tools > Istio.** This opens the Istio configuration page. -1. Change the CPU or memory allocations, the nodes where each component will be scheduled to, or the node tolerations. -1. Click **Save.** - -**Result:** The resource allocations for the Istio components are updated. - -## Pilot - -[Pilot](https://istio.io/docs/ops/deployment/architecture/#pilot) provides the following: - -- Authentication configuration -- Service discovery for the Envoy sidecars -- Traffic management capabilities for intelligent routing (A/B tests and canary rollouts) -- Configuration for resiliency (timeouts, retries, circuit breakers, etc) - -For more information on Pilot, refer to the [documentation](https://istio.io/docs/concepts/traffic-management/#pilot-and-envoy). - -Option | Description| Required | Default --------|------------|-------|------- -Pilot CPU Limit | CPU resource limit for the istio-pilot pod.| Yes | 1000 -Pilot CPU Reservation | CPU reservation for the istio-pilot pod. | Yes | 500 -Pilot Memory Limit | Memory resource limit for the istio-pilot pod. | Yes | 4096 -Pilot Memory Reservation | Memory resource requests for the istio-pilot pod. | Yes | 2048 -Trace sampling Percentage | [Trace sampling percentage](https://istio.io/docs/tasks/telemetry/distributed-tracing/overview/#trace-sampling) | Yes | 1 -Pilot Selector | Ability to select the nodes in which istio-pilot pod is deployed to. To use this option, the nodes must have labels. | No | n/a - -## Mixer - -[Mixer](https://istio.io/docs/ops/deployment/architecture/#mixer) enforces access control and usage policies across the service mesh. It also integrates with plugins for monitoring tools such as Prometheus. The Envoy sidecar proxy passes telemetry data and monitoring data to Mixer, and Mixer passes the monitoring data to Prometheus. - -For more information on Mixer, policies and telemetry, refer to the [documentation](https://istio.io/docs/concepts/policies-and-telemetry/). - -Option | Description| Required | Default --------|------------|-------|------- -Mixer Telemetry CPU Limit | CPU resource limit for the istio-telemetry pod.| Yes | 4800 -Mixer Telemetry CPU Reservation | CPU reservation for the istio-telemetry pod.| Yes | 1000 -Mixer Telemetry Memory Limit | Memory resource limit for the istio-telemetry pod.| Yes | 4096 -Mixer Telemetry Memory Reservation | Memory resource requests for the istio-telemetry pod.| Yes | 1024 -Enable Mixer Policy | Whether or not to deploy the istio-policy. | Yes | False -Mixer Policy CPU Limit | CPU resource limit for the istio-policy pod. | Yes, when policy enabled | 4800 -Mixer Policy CPU Reservation | CPU reservation for the istio-policy pod. | Yes, when policy enabled | 1000 -Mixer Policy Memory Limit | Memory resource limit for the istio-policy pod. | Yes, when policy enabled | 4096 -Mixer Policy Memory Reservation | Memory resource requests for the istio-policy pod. | Yes, when policy enabled | 1024 -Mixer Selector | Ability to select the nodes in which istio-policy and istio-telemetry pods are deployed to. To use this option, the nodes must have labels. | No | n/a - -## Tracing - -[Distributed tracing](https://istio.io/docs/tasks/telemetry/distributed-tracing/overview/) enables users to track a request through a service mesh. This makes it easier to troubleshoot problems with latency, parallelism and serialization. - -Option | Description| Required | Default --------|------------|-------|------- -Enable Tracing | Whether or not to deploy the istio-tracing. | Yes | True -Tracing CPU Limit | CPU resource limit for the istio-tracing pod. | Yes | 500 -Tracing CPU Reservation | CPU reservation for the istio-tracing pod. | Yes | 100 -Tracing Memory Limit | Memory resource limit for the istio-tracing pod. | Yes | 1024 -Tracing Memory Reservation | Memory resource requests for the istio-tracing pod. | Yes | 100 -Tracing Selector | Ability to select the nodes in which tracing pod is deployed to. To use this option, the nodes must have labels. | No | n/a - -## Ingress Gateway - -The Istio gateway allows Istio features such as monitoring and route rules to be applied to traffic entering the cluster. This gateway is a prerequisite for outside traffic to make requests to Istio. - -For more information, refer to the [documentation](https://istio.io/docs/tasks/traffic-management/ingress/). - -Option | Description| Required | Default --------|------------|-------|------- -Enable Ingress Gateway | Whether or not to deploy the istio-ingressgateway. | Yes | False -Service Type of Istio Ingress Gateway | How to expose the gateway. You can choose NodePort or Loadbalancer | Yes | NodePort -Http2 Port | The NodePort for http2 requests | Yes | 31380 -Https Port | The NodePort for https requests | Yes | 31390 -Load Balancer IP | Ingress Gateway Load Balancer IP | No | n/a -Load Balancer Source Ranges | Ingress Gateway Load Balancer Source Ranges | No | n/a -Ingress Gateway CPU Limit | CPU resource limit for the istio-ingressgateway pod. | Yes | 2000 -Ingress Gateway CPU Reservation | CPU reservation for the istio-ingressgateway pod. | Yes | 100 -Ingress Gateway Memory Limit | Memory resource limit for the istio-ingressgateway pod. | Yes | 1024 -Ingress Gateway Memory Reservation | Memory resource requests for the istio-ingressgateway pod. | Yes | 128 -Ingress Gateway Selector | Ability to select the nodes in which istio-ingressgateway pod is deployed to. To use this option, the nodes must have labels. | No | n/a - -## Prometheus - -You can query for Istio metrics using Prometheus. Prometheus is an open-source systems monitoring and alerting toolkit. - -Option | Description| Required | Default --------|------------|-------|------- -Prometheus CPU Limit | CPU resource limit for the Prometheus pod.| Yes | 1000 -Prometheus CPU Reservation | CPU reservation for the Prometheus pod.| Yes | 750 -Prometheus Memory Limit | Memory resource limit for the Prometheus pod.| Yes | 1024 -Prometheus Memory Reservation | Memory resource requests for the Prometheus pod.| Yes | 750 -Retention for Prometheus | How long your Prometheus instance retains data | Yes | 6 -Prometheus Selector | Ability to select the nodes in which Prometheus pod is deployed to. To use this option, the nodes must have labels.| No | n/a - -## Grafana - -You can visualize metrics with Grafana. Grafana lets you visualize Istio traffic data scraped by Prometheus. - -Option | Description| Required | Default --------|------------|-------|------- -Enable Grafana | Whether or not to deploy the Grafana.| Yes | True -Grafana CPU Limit | CPU resource limit for the Grafana pod.| Yes, when Grafana enabled | 200 -Grafana CPU Reservation | CPU reservation for the Grafana pod.| Yes, when Grafana enabled | 100 -Grafana Memory Limit | Memory resource limit for the Grafana pod.| Yes, when Grafana enabled | 512 -Grafana Memory Reservation | Memory resource requests for the Grafana pod.| Yes, when Grafana enabled | 100 -Grafana Selector | Ability to select the nodes in which Grafana pod is deployed to. To use this option, the nodes must have labels. | No | n/a -Enable Persistent Storage for Grafana | Enable Persistent Storage for Grafana | Yes, when Grafana enabled | False -Source | Use a Storage Class to provision a new persistent volume or Use an existing persistent volume claim | Yes, when Grafana enabled and enabled PV | Use SC -Storage Class | Storage Class for provisioning PV for Grafana | Yes, when Grafana enabled, enabled PV and use storage class | Use the default class -Persistent Volume Size | The size for the PV you would like to provision for Grafana | Yes, when Grafana enabled, enabled PV and use storage class | 5Gi -Existing Claim | Use existing PVC for Grafana | Yes, when Grafana enabled, enabled PV and use existing PVC | n/a diff --git a/content/rancher/v2.5/en/istio/legacy/setup/_index.md b/content/rancher/v2.5/en/istio/legacy/setup/_index.md deleted file mode 100644 index da1fbcacc7a..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/setup/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Setup Guide -weight: 2 ---- - -This section describes how to enable Istio and start using it in your projects. - -This section assumes that you have Rancher installed, and you have a Rancher-provisioned Kubernetes cluster where you would like to set up Istio. - -If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. - -> **Quick Setup** If you don't need external traffic to reach Istio, and you just want to set up Istio for monitoring and tracing traffic within the cluster, skip the steps for [setting up the Istio gateway]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) and [setting up Istio's components for traffic management.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) - -1. [Enable Istio in the cluster.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster) -1. [Enable Istio in all the namespaces where you want to use it.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) -1. [Select the nodes where the main Istio components will be deployed.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors) -1. [Add deployments and services that have the Istio sidecar injected.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads) -1. [Set up the Istio gateway. ]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) -1. [Set up Istio's components for traffic management.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) -1. [Generate traffic and see Istio in action.](#generate-traffic-and-see-istio-in-action) - -# Prerequisites - -This guide assumes you have already [installed Rancher,]({{}}/rancher/v2.x/en/installation) and you have already [provisioned a separate Kubernetes cluster]({{}}/rancher/v2.x/en/cluster-provisioning) on which you will install Istio. - -The nodes in your cluster must meet the [CPU and memory requirements.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources/) - -The workloads and services that you want to be controlled by Istio must meet [Istio's requirements.](https://istio.io/docs/setup/additional-setup/requirements/) diff --git a/content/rancher/v2.5/en/istio/legacy/setup/deploy-workloads/_index.md b/content/rancher/v2.5/en/istio/legacy/setup/deploy-workloads/_index.md deleted file mode 100644 index 8e52d678bbf..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/setup/deploy-workloads/_index.md +++ /dev/null @@ -1,322 +0,0 @@ ---- -title: 4. Add Deployments and Services with the Istio Sidecar -weight: 4 ---- - -> **Prerequisite:** To enable Istio for a workload, the cluster and namespace must have Istio enabled. - -Enabling Istio in a namespace only enables automatic sidecar injection for new workloads. To enable the Envoy sidecar for existing workloads, you need to enable it manually for each workload. - -To inject the Istio sidecar on an existing workload in the namespace, go to the workload, click the **⋮,** and click **Redeploy.** When the workload is redeployed, it will have the Envoy sidecar automatically injected. - -Wait a few minutes for the workload to upgrade to have the istio sidecar. Click it and go to the Containers section. You should be able to see istio-init and istio-proxy alongside your original workload. This means the Istio sidecar is enabled for the workload. Istio is doing all the wiring for the sidecar envoy. Now Istio can do all the features automatically if you enable them in the yaml. - -### 3. Add Deployments and Services - -Next we add the Kubernetes resources for the sample deployments and services for the BookInfo app in Istio's documentation. - -1. Go to the project inside the cluster you want to deploy the workload on. -1. In Workloads, click **Import YAML.** -1. Copy the below resources into the form. -1. Click **Import.** - -This will set up the following sample resources from Istio's example BookInfo app: - -Details service and deployment: - -- A `details` Service -- A ServiceAccount for `bookinfo-details` -- A `details-v1` Deployment - -Ratings service and deployment: - -- A `ratings` Service -- A ServiceAccount for `bookinfo-ratings` -- A `ratings-v1` Deployment - -Reviews service and deployments (three versions): - -- A `reviews` Service -- A ServiceAccount for `bookinfo-reviews` -- A `reviews-v1` Deployment -- A `reviews-v2` Deployment -- A `reviews-v3` Deployment - -Productpage service and deployment: - -This is the main page of the app, which will be visible from a web browser. The other services will be called from this page. - -- A `productpage` service -- A ServiceAccount for `bookinfo-productpage` -- A `productpage-v1` Deployment - -### Resource YAML - -```yaml -# Copyright 2017 Istio Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -################################################################################################## -# Details service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: details - labels: - app: details - service: details -spec: - ports: - - port: 9080 - name: http - selector: - app: details ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-details ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: details-v1 - labels: - app: details - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: details - version: v1 - template: - metadata: - labels: - app: details - version: v1 - spec: - serviceAccountName: bookinfo-details - containers: - - name: details - image: docker.io/istio/examples-bookinfo-details-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Ratings service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: ratings - labels: - app: ratings - service: ratings -spec: - ports: - - port: 9080 - name: http - selector: - app: ratings ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-ratings ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ratings-v1 - labels: - app: ratings - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: ratings - version: v1 - template: - metadata: - labels: - app: ratings - version: v1 - spec: - serviceAccountName: bookinfo-ratings - containers: - - name: ratings - image: docker.io/istio/examples-bookinfo-ratings-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Reviews service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: reviews - labels: - app: reviews - service: reviews -spec: - ports: - - port: 9080 - name: http - selector: - app: reviews ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-reviews ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v1 - labels: - app: reviews - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v1 - template: - metadata: - labels: - app: reviews - version: v1 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v2 - labels: - app: reviews - version: v2 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v2 - template: - metadata: - labels: - app: reviews - version: v2 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v2:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v3 - labels: - app: reviews - version: v3 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v3 - template: - metadata: - labels: - app: reviews - version: v3 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v3:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Productpage services -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: productpage - labels: - app: productpage - service: productpage -spec: - ports: - - port: 9080 - name: http - selector: - app: productpage ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-productpage ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: productpage-v1 - labels: - app: productpage - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: productpage - version: v1 - template: - metadata: - labels: - app: productpage - version: v1 - spec: - serviceAccountName: bookinfo-productpage - containers: - - name: productpage - image: docker.io/istio/examples-bookinfo-productpage-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -``` - -### [Next: Set up the Istio Gateway]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) diff --git a/content/rancher/v2.5/en/istio/legacy/setup/enable-istio-in-cluster/_index.md b/content/rancher/v2.5/en/istio/legacy/setup/enable-istio-in-cluster/_index.md deleted file mode 100644 index a98090a28ca..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/setup/enable-istio-in-cluster/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: 1. Enable Istio in the Cluster -weight: 1 ---- - -This cluster uses the default Nginx controller to allow traffic into the cluster. - -A Rancher [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) can configure Rancher to deploy Istio in a Kubernetes cluster. - -> If the cluster has a Pod Security Policy enabled there are [prerequisites steps]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/) - -1. From the **Global** view, navigate to the **cluster** where you want to enable Istio. -1. Click **Tools > Istio.** -1. Optional: Configure member access and [resource limits]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources/) for the Istio components. Ensure you have enough resources on your worker nodes to enable Istio. -1. Click **Enable**. -1. Click **Save**. - -**Result:** Istio is enabled at the cluster level. - -The Istio application, `cluster-istio`, is added as an application to the cluster's `system` project. - -When Istio is enabled in the cluster, the label for Istio sidecar auto injection,`istio-injection=enabled`, will be automatically added to each new namespace in this cluster. This automatically enables Istio sidecar injection in all new workloads that are deployed in those namespaces. You will need to manually enable Istio in preexisting namespaces and workloads. - -### [Next: Enable Istio in a Namespace]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) diff --git a/content/rancher/v2.5/en/istio/legacy/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md b/content/rancher/v2.5/en/istio/legacy/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md deleted file mode 100644 index f31369cfc61..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Enable Istio with Pod Security Policies ---- - - >**Note:** The following guide is only for RKE provisioned clusters. - -If you have restrictive Pod Security Policies enabled, then Istio may not be able to function correctly, because it needs certain permissions in order to install itself and manage pod infrastructure. In this section, we will configure a cluster with PSPs enabled for an Istio install, and also set up the Istio CNI plugin. - -The Istio CNI plugin removes the need for each application pod to have a privileged `NET_ADMIN` container. For further information, see the [Istio CNI Plugin docs](https://istio.io/docs/setup/additional-setup/cni). Please note that the [Istio CNI Plugin is in alpha](https://istio.io/about/feature-stages/). - -- 1. [Configure the System Project Policy to allow Istio install.](#2-configure-the-system-project-policy-to-allow-istio-install) -- 2. [Install the CNI plugin in the System project.](#3-install-the-cni-plugin-in-the-system-project) -- 3. [Install Istio.](#4-install-istio) - -### 1. Configure the System Project Policy to allow Istio install - -1. From the main menu of the **Dashboard**, select **Projects/Namespaces**. -1. Find the **Project: System** project and select the **⋮ > Edit**. -1. Change the Pod Security Policy option to be unrestricted, then click Save. - - -### 2. Install the CNI Plugin in the System Project - -1. From the main menu of the **Dashboard**, select **Projects/Namespaces**. -1. Select the **Project: System** project. -1. Choose **Tools > Catalogs** in the navigation bar. -1. Add a catalog with the following: - 1. Name: istio-cni - 1. Catalog URL: https://github.com/istio/cni - 1. Branch: The branch that matches your current release, for example: `release-1.4`. -1. From the main menu select **Apps** -1. Click Launch and select istio-cni -1. Update the namespace to be "kube-system" -1. In the answers section, click "Edit as YAML" and paste in the following, then click launch: - -``` ---- - logLevel: "info" - excludeNamespaces: - - "istio-system" - - "kube-system" -``` - -### 3. Install Istio - -Follow the [primary instructions]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/), adding a custom answer: `istio_cni.enabled: true`. - -After Istio has finished installing, the Apps page in System Projects should show both istio and `istio-cni` applications deployed successfully. Sidecar injection will now be functional. diff --git a/content/rancher/v2.5/en/istio/legacy/setup/enable-istio-in-namespace/_index.md b/content/rancher/v2.5/en/istio/legacy/setup/enable-istio-in-namespace/_index.md deleted file mode 100644 index 2f1f6d74786..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/setup/enable-istio-in-namespace/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: 2. Enable Istio in a Namespace -weight: 2 ---- - -You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. - -This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. - -> **Prerequisite:** To enable Istio in a namespace, the cluster must have Istio enabled. - -1. In the Rancher UI, go to the cluster view. Click the **Projects/Namespaces** tab. -1. Go to the namespace where you want to enable the Istio sidecar auto injection and click the **⋮.** -1. Click **Edit.** -1. In the **Istio sidecar auto injection** section, click **Enable.** -1. Click **Save.** - -**Result:** The namespace now has the label `istio-injection=enabled`. All new workloads deployed in this namespace will have the Istio sidecar injected by default. - -### Verifying that Automatic Istio Sidecar Injection is Enabled - -To verify that Istio is enabled, deploy a hello-world workload in the namespace. Go to the workload and click the pod name. In the **Containers** section, you should see the `istio-proxy` container. - -### Excluding Workloads from Being Injected with the Istio Sidecar - -If you need to exclude a workload from getting injected with the Istio sidecar, use the following annotation on the workload: - -``` -sidecar.istio.io/inject: “false” -``` - -To add the annotation to a workload, - -1. From the **Global** view, open the project that has the workload that should not have the sidecar. -1. Click **Resources > Workloads.** -1. Go to the workload that should not have the sidecar and click **⋮ > Edit.** -1. Click **Show Advanced Options.** Then expand the **Labels & Annotations** section. -1. Click **Add Annotation.** -1. In the **Key** field, enter `sidecar.istio.io/inject`. -1. In the **Value** field, enter `false`. -1. Click **Save.** - -**Result:** The Istio sidecar will not be injected into the workload. - -> **NOTE:** If you are having issues with a Job you deployed not completing, you will need to add this annotation to your pod using the provided steps. Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. - - -### [Next: Select the Nodes ]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors) \ No newline at end of file diff --git a/content/rancher/v2.5/en/istio/legacy/setup/gateway/_index.md b/content/rancher/v2.5/en/istio/legacy/setup/gateway/_index.md deleted file mode 100644 index 47c9ff33812..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/setup/gateway/_index.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: 5. Set up the Istio Gateway -weight: 5 ---- - -The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. - -You can use the NGINX ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. - -To allow Istio to receive external traffic, you need to enable Istio's gateway, which works as a north-south proxy for external traffic. When you enable the Istio gateway, the result is that your cluster will have two ingresses. - -You will also need to set up a Kubernetes gateway for your services. This Kubernetes resource points to Istio's implementation of the ingress gateway to the cluster. - -You can route traffic into the service mesh with a load balancer or just Istio's NodePort gateway. This section describes how to set up the NodePort gateway. - -For more information on the Istio gateway, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/gateway/) - -![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) - -# Enable the Istio Gateway - -The ingress gateway is a Kubernetes service that will be deployed in your cluster. There is only one Istio gateway per cluster. - -1. Go to the cluster where you want to allow outside traffic into Istio. -1. Click **Tools > Istio.** -1. Expand the **Ingress Gateway** section. -1. Under **Enable Ingress Gateway,** click **True.** The default type of service for the Istio gateway is NodePort. You can also configure it as a [load balancer.]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/) -1. Optionally, configure the ports, service types, node selectors and tolerations, and resource requests and limits for this service. The default resource requests for CPU and memory are the minimum recommended resources. -1. Click **Save.** - -**Result:** The gateway is deployed, which allows Istio to receive traffic from outside the cluster. - -# Add a Kubernetes Gateway that Points to the Istio Gateway - -To allow traffic to reach Ingress, you will also need to provide a Kubernetes gateway resource in your YAML that points to Istio's implementation of the ingress gateway to the cluster. - -1. Go to the namespace where you want to deploy the Kubernetes gateway and click **Import YAML.** -1. Upload the gateway YAML as a file or paste it into the form. An example gateway YAML is provided below. -1. Click **Import.** - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: bookinfo-gateway -spec: - selector: - istio: ingressgateway # use istio default controller - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "*" ---- -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: bookinfo -spec: - hosts: - - "*" - gateways: - - bookinfo-gateway - http: - - match: - - uri: - exact: /productpage - - uri: - prefix: /static - - uri: - exact: /login - - uri: - exact: /logout - - uri: - prefix: /api/v1/products - route: - - destination: - host: productpage - port: - number: 9080 -``` - -**Result:** You have configured your gateway resource so that Istio can receive traffic from outside the cluster. - -Confirm that the resource exists by running: -``` -kubectl get gateway -A -``` - -The result should be something like this: -``` -NAME AGE -bookinfo-gateway 64m -``` - -### Access the ProductPage Service from a Web Browser - -To test and see if the BookInfo app deployed correctly, the app can be viewed a web browser using the Istio controller IP and port, combined with the request name specified in your Kubernetes gateway resource: - -`http://:/productpage` - -To get the ingress gateway URL and port, - -1. Go to the `System` project in your cluster. -1. Within the `System` project, go to `Resources` > `Workloads` then scroll down to the `istio-system` namespace. -1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Under the name of this workload, you should see links, such as `80/tcp`. -1. Click one of those links. This should show you the URL of the ingress gateway in your web browser. Append `/productpage` to the URL. - -**Result:** You should see the BookInfo app in the web browser. - -For help inspecting the Istio controller URL and ports, try the commands the [Istio documentation.](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#determining-the-ingress-ip-and-ports) - -# Troubleshooting - -The [official Istio documentation](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#troubleshooting) suggests `kubectl` commands to inspect the correct ingress host and ingress port for external requests. - -### Confirming that the Kubernetes Gateway Matches Istio's Ingress Controller - -You can try the steps in this section to make sure the Kubernetes gateway is configured properly. - -In the gateway resource, the selector refers to Istio's default ingress controller by its label, in which the key of the label is `istio` and the value is `ingressgateway`. To make sure the label is appropriate for the gateway, do the following: - -1. Go to the `System` project in your cluster. -1. Within the `System` project, go to the namespace `istio-system`. -1. Within `istio-system`, there is a workload named `istio-ingressgateway`. -1. Click the name of this workload and go to the **Labels and Annotations** section. You should see that it has the key `istio` and the value `ingressgateway`. This confirms that the selector in the Gateway resource matches Istio's default ingress controller. - -### [Next: Set up Istio's Components for Traffic Management]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) diff --git a/content/rancher/v2.5/en/istio/legacy/setup/node-selectors/_index.md b/content/rancher/v2.5/en/istio/legacy/setup/node-selectors/_index.md deleted file mode 100644 index 994656361e3..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/setup/node-selectors/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: 3. Select the Nodes Where Istio Components Will be Deployed -weight: 3 ---- - -> **Prerequisite:** Your cluster needs a worker node that can designated for Istio. The worker node should meet the [resource requirements.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources) - -This section describes how use node selectors to configure Istio components to be deployed on a designated node. - -In larger deployments, it is strongly advised that Istio's infrastructure be placed on dedicated nodes in the cluster by adding a node selector for each Istio component. - -# Adding a Label to the Istio Node - -First, add a label to the node where Istio components should be deployed. This label can have any key-value pair. For this example, we will use the key `istio` and the value `enabled`. - -1. From the cluster view, go to the **Nodes** tab. -1. Go to a worker node that will host the Istio components and click **⋮ > Edit.** -1. Expand the **Labels & Annotations** section. -1. Click **Add Label.** -1. In the fields that appear, enter `istio` for the key and `enabled` for the value. -1. Click **Save.** - -**Result:** A worker node has the label that will allow you to designate it for Istio components. - -# Configuring Istio Components to Use the Labeled Node - -Configure each Istio component to be deployed to the node with the Istio label. Each Istio component can be configured individually, but in this tutorial, we will configure all of the components to be scheduled on the same node for the sake of simplicity. - -For larger deployments, it is recommended to schedule each component of Istio onto separate nodes. - -1. From the cluster view, click **Tools > Istio.** -1. Expand the **Pilot** section and click **Add Selector** in the form that appears. Enter the node selector label that you added to the Istio node. In our case, we are using the key `istio` and the value `enabled.` -1. Repeat the previous step for the **Mixer** and **Tracing** sections. -1. Click **Save.** - -**Result:** The Istio components will be deployed on the Istio node. - -### [Next: Add Deployments and Services]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads) \ No newline at end of file diff --git a/content/rancher/v2.5/en/istio/legacy/setup/set-up-traffic-management/_index.md b/content/rancher/v2.5/en/istio/legacy/setup/set-up-traffic-management/_index.md deleted file mode 100644 index 2048e779265..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/setup/set-up-traffic-management/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: 6. Set up Istio's Components for Traffic Management -weight: 6 ---- - -A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. - -- [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. -- [Destination rules](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule/) serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. - -This section describes how to add an example virtual service that corresponds to the `reviews` microservice in the sample BookInfo app. The purpose of this service is to divide traffic between two versions of the `reviews` service. - -In this example, we take the traffic to the `reviews` service and intercept it so that 50 percent of it goes to `v1` of the service and 50 percent goes to `v2`. - -After this virtual service is deployed, we will generate traffic and see from the Kiali visualization that traffic is being routed evenly between the two versions of the service. - -To deploy the virtual service and destination rules for the `reviews` service, - -1. Go to the project view and click **Import YAML.** -1. Copy resources below into the form. -1. Click **Import.** - -``` -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: reviews -spec: - hosts: - - reviews - http: - - route: - - destination: - host: reviews - subset: v1 - weight: 50 - - destination: - host: reviews - subset: v3 - weight: 50 ---- -apiVersion: networking.istio.io/v1alpha3 -kind: DestinationRule -metadata: - name: reviews -spec: - host: reviews - subsets: - - name: v1 - labels: - version: v1 - - name: v2 - labels: - version: v2 - - name: v3 - labels: - version: v3 -``` -**Result:** When you generate traffic to this service (for example, by refreshing the ingress gateway URL), the Kiali traffic graph will reflect that traffic to the `reviews` service is divided evenly between `v1` and `v3`. - -### [Next: Generate and View Traffic]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/view-traffic) diff --git a/content/rancher/v2.5/en/istio/legacy/setup/view-traffic/_index.md b/content/rancher/v2.5/en/istio/legacy/setup/view-traffic/_index.md deleted file mode 100644 index bb6c979e28d..00000000000 --- a/content/rancher/v2.5/en/istio/legacy/setup/view-traffic/_index.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: 7. Generate and View Traffic -weight: 7 ---- - -This section describes how to view the traffic that is being managed by Istio. - -# The Kiali Traffic Graph - -Rancher integrates a Kiali graph into the Rancher UI. The Kiali graph provides a powerful way to visualize the topology of your Istio service mesh. It shows you which services communicate with each other. - -To see the traffic graph, - -1. From the project view in Rancher, click **Resources > Istio.** -1. Go to the **Traffic Graph** tab. This tab has the Kiali network visualization integrated into the UI. - -If you refresh the URL to the BookInfo app several times, you should be able to see green arrows on the Kiali graph showing traffic to `v1` and `v3` of the `reviews` service. The control panel on the right side of the graph lets you configure details including how many minutes of the most recent traffic should be shown on the graph. - -For additional tools and visualizations, you can go to each UI for Kiali, Jaeger, Grafana, and Prometheus by clicking their icons in the top right corner of the page. - -# Viewing Traffic Metrics - -Istio’s monitoring features provide visibility into the performance of all your services. - -1. From the project view in Rancher, click **Resources > Istio.** -1. Go to the **Traffic Metrics** tab. After traffic is generated in your cluster, you should be able to see metrics for **Success Rate, Request Volume, 4xx Response Count, Project 5xx Response Count** and **Request Duration.** \ No newline at end of file diff --git a/content/rancher/v2.5/en/logging/legacy/_index.md b/content/rancher/v2.5/en/logging/legacy/_index.md deleted file mode 100644 index 5f8404cf23d..00000000000 --- a/content/rancher/v2.5/en/logging/legacy/_index.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Legacy UI Docs -weight: 2 ---- - -Logging is helpful because it allows you to: - -- Capture and analyze the state of your cluster -- Look for trends in your environment -- Save your logs to a safe location outside of your cluster -- Stay informed of events like a container crashing, a pod eviction, or a node dying -- More easily debug and troubleshoot problems - -Rancher supports integration with the following services: - -- Elasticsearch -- Splunk -- Kafka -- Syslog -- Fluentd - -This section covers the following topics: - -- [How logging integrations work](#how-logging-integrations-work) -- [Requirements](#requirements) -- [Logging scope](#logging-scope) -- [Enabling cluster logging](#enabling-cluster-logging) - -# How Logging Integrations Work - -Rancher can integrate with popular external services used for event streams, telemetry, or search. These services can log errors and warnings in your Kubernetes infrastructure to a stream. - -These services collect container log events, which are saved to the `/var/log/containers` directory on each of your nodes. The service collects both standard and error events. You can then log into your services to review the events collected, leveraging each service's unique features. - -When configuring Rancher to integrate with these services, you'll have to point Rancher toward the service's endpoint and provide authentication information. - -Additionally, you'll have the opportunity to enter key-value pairs to filter the log events collected. The service will only collect events for containers marked with your configured key-value pairs. - ->**Note:** You can only configure one logging service per cluster or per project. - -# Requirements - -The Docker daemon on each node in the cluster should be [configured](https://docs.docker.com/config/containers/logging/configure/) with the (default) log-driver: `json-file`. You can check the log-driver by running the following command: - -``` -$ docker info | grep 'Logging Driver' -Logging Driver: json-file -``` - -# Logging Scope - -You can configure logging at either cluster level or project level. - -- Cluster logging writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), it also writes logs for all the Kubernetes system components. -- [Project logging]({{}}/rancher/v2.x/en/project-admin/tools/logging/) writes logs for every pod in that particular project. - -Logs that are sent to your logging service are from the following locations: - - - Pod logs stored at `/var/log/containers`. - - Kubernetes system components logs stored at `/var/lib/rancher/rke/log/`. - -# Enabling Cluster Logging - -As an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send Kubernetes logs to a logging service. - -1. From the **Global** view, navigate to the cluster that you want to configure cluster logging. - -1. Select **Tools > Logging** in the navigation bar. - -1. Select a logging service and enter the configuration. Refer to the specific service for detailed configuration. Rancher supports integration with the following services: - - - [Elasticsearch]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/elasticsearch/) - - [Splunk]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/splunk/) - - [Kafka]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/kafka/) - - [Syslog]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/syslog/) - - [Fluentd]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/fluentd/) - -1. (Optional) Instead of using the UI to configure the logging services, you can enter custom advanced configurations by clicking on **Edit as File**, which is located above the logging targets. This link is only visible after you select a logging service. - - - With the file editor, enter raw fluentd configuration for any logging service. Refer to the documentation for each logging service on how to setup the output configuration. - - - [Elasticsearch Documentation](https://github.com/uken/fluent-plugin-elasticsearch) - - [Splunk Documentation](https://github.com/fluent/fluent-plugin-splunk) - - [Kafka Documentation](https://github.com/fluent/fluent-plugin-kafka) - - [Syslog Documentation](https://github.com/dlackty/fluent-plugin-remote_syslog) - - [Fluentd Documentation](https://docs.fluentd.org/v1.0/articles/out_forward) - - - If the logging service is using TLS, you also need to complete the **SSL Configuration** form. - 1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - 2. If you are using a self-signed certificate, provide the **CA Certificate PEM**. - -1. (Optional) Complete the **Additional Logging Configuration** form. - - 1. **Optional:** Use the **Add Field** button to add custom log fields to your logging configuration. These fields are key value pairs (such as `foo=bar`) that you can use to filter the logs from another system. - - 1. Enter a **Flush Interval**. This value determines how often [Fluentd](https://www.fluentd.org/) flushes data to the logging server. Intervals are measured in seconds. - - 1. **Include System Log**. The logs from pods in system project and RKE components will be sent to the target. Uncheck it to exclude the system logs. - -1. Click **Test**. Rancher sends a test log to the service. - - > **Note:** This button is replaced with _Dry Run_ if you are using the custom configuration editor. In this case, Rancher calls the fluentd dry run command to validate the configuration. - -1. Click **Save**. - -**Result:** Rancher is now configured to send logs to the selected service. Log into the logging service so that you can start viewing the logs. - -## Related Links - -[Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/) diff --git a/content/rancher/v2.5/en/logging/legacy/elasticsearch/_index.md b/content/rancher/v2.5/en/logging/legacy/elasticsearch/_index.md deleted file mode 100644 index 5f9a2a7b724..00000000000 --- a/content/rancher/v2.5/en/logging/legacy/elasticsearch/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Elasticsearch -weight: 200 ---- - -If your organization uses [Elasticsearch](https://www.elastic.co/), either on premise or in the cloud, you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Elasticsearch deployment to view logs. - ->**Prerequisites:** Configure an [Elasticsearch deployment](https://www.elastic.co/guide/en/cloud/saas-release/ec-create-deployment.html). - -## Elasticsearch Deployment Configuration - -1. In the **Endpoint** field, enter the IP address and port of your Elasticsearch instance. You can find this information from the dashboard of your Elasticsearch deployment. - - * Elasticsearch usually uses port `9200` for HTTP and `9243` for HTTPS. - -1. If you are using [X-Pack Security](https://www.elastic.co/guide/en/x-pack/current/xpack-introduction.html), enter your Elasticsearch **Username** and **Password** for authentication. - -1. Enter an [Index Pattern](https://www.elastic.co/guide/en/kibana/current/index-patterns.html). - -## SSL Configuration - -If your instance of Elasticsearch uses SSL, your **Endpoint** will need to begin with `https://`. With the correct endpoint, the **SSL Configuration** form is enabled and ready to be completed. - -1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - -1. Enter your **Client Key Password**. - -1. Enter your **SSL Version**. The default version is `TLSv1_2`. - -1. Select whether or not you want to verify your SSL. - - * If you are using a self-signed certificate, select **Enabled - Input trusted server certificate**, provide the **CA Certificate PEM**. You can copy and paste the certificate or upload it using the **Read from a file** button. - * If you are using a certificate from a certificate authority, select **Enabled - Input trusted server certificate**. You do not need to provide a **CA Certificate PEM**. diff --git a/content/rancher/v2.5/en/logging/legacy/fluentd/_index.md b/content/rancher/v2.5/en/logging/legacy/fluentd/_index.md deleted file mode 100644 index 42f54794862..00000000000 --- a/content/rancher/v2.5/en/logging/legacy/fluentd/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Fluentd -weight: 600 ---- - -If your organization uses [Fluentd](https://www.fluentd.org/), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Fluentd server to view logs. - ->**Prerequisites:** Configure Fluentd input forward to receive the event stream. -> ->See [Fluentd Documentation](https://docs.fluentd.org/v1.0/articles/in_forward) for details. - -## Fluentd Configuration - -You can add multiple Fluentd Servers. If you want to add additional Fluentd servers, click **Add Fluentd Server**. For each Fluentd server, complete the configuration information: - -1. In the **Endpoint** field, enter the address and port of your Fluentd instance, e.g. `http://Fluentd-server:24224`. - -1. Enter the **Shared Key** if your Fluentd Server is using a shared key for authentication. - -1. Enter the **Username** and **Password** if your Fluentd Server is using username and password for authentication. - -1. **Optional:** Enter the **Hostname** of the Fluentd server. - -1. Enter the load balancing **Weight** of the Fluentd server. If the weight of one server is 20 and the other server is 30, events will be sent in a 2:3 ratio. If you do not enter a weight, the default weight is 60. - -1. If this server is a standby server, check **Use as Standby Only**. Standby servers are used when all other servers are not available. - -After adding all the Fluentd servers, you have the option to select **Enable Gzip Compression**. By default, this is enabled because the transferred payload size will be reduced. - -## SSL Configuration - -If your Fluentd servers are using TLS, you need to select **Use TLS**. If you are using a self-signed certificate, provide the **CA Certificate PEM**. You can copy and paste the certificate or upload it using the **Read from a file** button. - ->**Note:** Fluentd does not support self-signed certificates when client authentication is enabled. diff --git a/content/rancher/v2.5/en/logging/legacy/kafka/_index.md b/content/rancher/v2.5/en/logging/legacy/kafka/_index.md deleted file mode 100644 index 6c0b69bc508..00000000000 --- a/content/rancher/v2.5/en/logging/legacy/kafka/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Kafka -weight: 400 ---- - -If your organization uses [Kafka](https://kafka.apache.org/), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Kafka server to view logs. - ->**Prerequisite:** You must have a Kafka server configured. - -## Kafka Server Configuration - -1. Select the type of **Endpoint** your Kafka server is using: - - * **Zookeeper**: Enter the IP address and port. By default, Zookeeper uses port `2181`. Please note that a Zookeeper endpoint cannot enable TLS. - * **Broker**: Click on **Add Endpoint**. For each Kafka broker, enter the IP address and port. By default, Kafka brokers use port `9092`. - -1. In the **Topic** field, enter the name of a Kafka [topic](https://kafka.apache.org/documentation/#basic_ops_add_topic) that your Kubernetes cluster submits logs to. - -## **Broker** Endpoint Type - -### SSL Configuration - -If your Kafka cluster is using SSL for the **Broker**, you need to complete the **SSL Configuration** form. - -1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - -1. Provide the **CA Certificate PEM**. You can either copy and paste the certificate or upload it using the **Read from a file** button. - ->**Note:** Kafka does not support self-signed certificates when client authentication is enabled. - -### SASL configuration - -If your Kafka cluster is using [SASL authentication](https://kafka.apache.org/documentation/#security_sasl) for the Broker, you need to complete the **SASL Configuration** form. - -1. Enter the SASL **Username** and **Password**. - -1. Select the **SASL Type** that your Kafka cluster is using. - - * If your Kafka is using **Plain**, please ensure your Kafka cluster is using SSL. - - * If your Kafka is using **Scram**, you need to select which **Scram Mechanism** Kafka is using. diff --git a/content/rancher/v2.5/en/logging/legacy/splunk/_index.md b/content/rancher/v2.5/en/logging/legacy/splunk/_index.md deleted file mode 100644 index 67482f0aa3f..00000000000 --- a/content/rancher/v2.5/en/logging/legacy/splunk/_index.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Splunk -weight: 300 ---- - -If your organization uses [Splunk](https://www.splunk.com/), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Splunk server to view logs. - ->**Prerequisites:** -> ->- Configure HTTP event collection for your Splunk Server (Splunk Enterprise or Splunk Cloud). ->- Either create a new token or copy an existing token. -> ->For more information, see [Splunk Documentation](http://docs.splunk.com/Documentation/Splunk/7.1.2/Data/UsetheHTTPEventCollector#About_Event_Collector_tokens). - -## Splunk Configuration - -1. In the **Endpoint** field, enter the IP address and port for you Splunk instance (i.e. `http://splunk-server:8088`) - - * Splunk usually uses port `8088`. If you're using Splunk Cloud, you'll need to work with [Splunk support](https://www.splunk.com/en_us/support-and-services.html) to get an endpoint URL. - -1. Enter the **Token** you obtained while completing the prerequisites (i.e., when you created a token in Splunk). - -1. In the **Source** field, enter the name of the token as entered in Splunk. - -1. **Optional:** Provide one or more [index](http://docs.splunk.com/Documentation/Splunk/7.1.2/Indexer/Aboutindexesandindexers) that's allowed for your token. - -## SSL Configuration - -If your instance of Splunk uses SSL, your **Endpoint** will need to begin with `https://`. With the correct endpoint, the **SSL Configuration** form is enabled and ready to be completed. - -1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - -1. Enter your **Client Key Password**. - -1. Select whether or not you want to verify your SSL. - - * If you are using a self-signed certificate, select **Enabled - Input trusted server certificate**, provide the **CA Certificate PEM**. You can copy and paste the certificate or upload it using the **Read from a file** button. - * If you are using a certificate from a certificate authority, select **Enabled - Input trusted server certificate**. You do not need to provide a **CA Certificate PEM**. - -## Viewing Logs - -1. Log into your Splunk server. - -1. Click on **Search & Reporting**. The number of **Indexed Events** listed should be increasing. - -1. Click on Data Summary and select the Sources tab. - ![View Logs]({{}}/img/rancher/splunk/splunk4.jpg) - -1. To view the actual logs, click on the source that you declared earlier. - ![View Logs]({{}}/img/rancher/splunk/splunk5.jpg) - -## Troubleshooting - -You can use curl to see if **HEC** is listening for HTTP event data. - -``` -$ curl http://splunk-server:8088/services/collector/event \ - -H 'Authorization: Splunk 8da70994-b1b0-4a79-b154-bfaae8f93432' \ - -d '{"event": "hello world"}' -``` - -If Splunk is configured correctly, you should receive **json** data returning `success code 0`. You should be able -to send logging data to HEC. - -If you received an error, check your configuration in Splunk and Rancher. diff --git a/content/rancher/v2.5/en/logging/legacy/syslog/_index.md b/content/rancher/v2.5/en/logging/legacy/syslog/_index.md deleted file mode 100644 index aa997ca949a..00000000000 --- a/content/rancher/v2.5/en/logging/legacy/syslog/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Syslog -weight: 500 ---- - -If your organization uses [Syslog](https://tools.ietf.org/html/rfc5424), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Syslog server to view logs. - ->**Prerequisite:** You must have a Syslog server configured. - -If you are using rsyslog, please make sure your rsyslog authentication mode is `x509/name`. - -## Syslog Server Configuration - -1. In the **Endpoint** field, enter the IP address and port for your Syslog server. Additionally, in the dropdown, select the protocol that your Syslog server uses. - -1. In the **Program** field, enter the name of the application sending logs to your Syslog server, e.g. `Rancher`. - -1. If you are using a cloud logging service, e.g. [Sumologic](https://www.sumologic.com/), enter a **Token** that authenticates with your Syslog server. You will need to create this token in the cloud logging service. - -1. Select a **Log Severity** for events that are logged to the Syslog server. For more information on each severity level, see the [Syslog protocol documentation](https://tools.ietf.org/html/rfc5424#page-11). - - - By specifying a **Log Severity** does not mean that will act as a filtering mechanism for logs. To do that you should use a parser on the Syslog server. - -## Encryption Configuration - -If your Syslog server is using **TCP** protocol and uses TLS, you need to select **Use TLS** and complete the **Encryption Configuration** form. - -1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - -1. Select whether or not you want to verify your SSL. - - * If you are using a self-signed certificate, select **Enabled - Input trusted server certificate**, provide the **CA Certificate PEM**. You can copy and paste the certificate or upload it using the **Read from a file** button. - * If you are using a certificate from a certificate authority, select **Enabled - Input trusted server certificate**. You do not need to provide a **CA Certificate PEM**. diff --git a/content/rancher/v2.5/en/longhorn-storage/_index.md b/content/rancher/v2.5/en/longhorn-storage/_index.md deleted file mode 100644 index 6fa8ec6a750..00000000000 --- a/content/rancher/v2.5/en/longhorn-storage/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Longhorn Storage -weight: 14 ---- - -> This page is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/longhorn-storage/legacy/_index.md b/content/rancher/v2.5/en/longhorn-storage/legacy/_index.md deleted file mode 100644 index 1629da37c2d..00000000000 --- a/content/rancher/v2.5/en/longhorn-storage/legacy/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Legacy UI Docs -weight: 2 ---- \ No newline at end of file diff --git a/content/rancher/v2.5/en/managing-applications/_index.md b/content/rancher/v2.5/en/managing-applications/_index.md deleted file mode 100644 index 808c3bf7ea6..00000000000 --- a/content/rancher/v2.5/en/managing-applications/_index.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Managing Applications -description: Rancher enables the use of catalogs to repeatedly deploy applications easily. Catalogs are GitHub or Helm Chart repositories filled with deployment-ready apps. -weight: 7 ---- - -> This section is under construction. - -Rancher provides the ability to use a catalog of Helm charts that make it easy to repeatedly deploy applications. - -- **Catalogs** are GitHub repositories or Helm Chart repositories filled with applications that are ready-made for deployment. Applications are bundled in objects called _Helm charts_. -- **Helm charts** are a collection of files that describe a related set of Kubernetes resources. A single chart might be used to deploy something simple, like a memcached pod, or something complex, like a full web app stack with HTTP servers, databases, caches, and so on. - -Rancher improves on Helm catalogs and charts. All native Helm charts can work within Rancher, but Rancher adds several enhancements to improve their user experience. - -This section covers the following topics: - -- [Catalog scopes](#catalog-scopes) -- [Catalog Helm Deployment Versions](#catalog-helm-deployment-versions) -- [When to use Helm 3](#when-to-use-helm-3) -- [Helm 3 Backwards Compatibility](#helm-3-backwards-compatibility) -- [Built-in global catalogs](#built-in-global-catalogs) -- [Custom catalogs](#custom-catalogs) -- [Creating and launching applications](#creating-and-launching-applications) -- [Chart compatibility with Rancher](#chart-compatibility-with-rancher) -- [Global DNS](#global-dns) - -# Catalog Scopes - -Within Rancher, you can manage catalogs at three different scopes. Global catalogs are shared across all clusters and project. There are some use cases where you might not want to share catalogs between different clusters or even projects in the same cluster. By leveraging cluster and project scoped catalogs, you will be able to provide applications for specific teams without needing to share them with all clusters and/or projects. - -Scope | Description | Available As of | ---- | --- | --- | -Global | All clusters and all projects can access the Helm charts in this catalog | v2.0.0 | -Cluster | All projects in the specific cluster can access the Helm charts in this catalog | v2.2.0 | -Project | This specific cluster can access the Helm charts in this catalog | v2.2.0 | - -# Catalog Helm Deployment Versions - -_Applicable as of v2.4.0_ - -In November 2019, Helm 3 was released, and some features were deprecated or refactored. It is not fully [backwards compatible]({{}}/rancher/v2.x/en/catalog#helm-3-backwards-compatibility) with Helm 2. Therefore, catalogs in Rancher need to be separated, with each catalog only using one Helm version. This will help reduce app deployment issues as your Rancher users will not need to know which version of your chart is compatible with which Helm version - they can just select a catalog, select an app and deploy a version that has already been vetted for compatibility. - -When you create a custom catalog, you will have to configure the catalog to use either Helm 2 or Helm 3. This version cannot be changed later. If the catalog is added with the wrong Helm version, it will need to be deleted and re-added. - -When you launch a new app from a catalog, the app will be managed by the catalog's Helm version. A Helm 2 catalog will use Helm 2 to manage all of the apps, and a Helm 3 catalog will use Helm 3 to manage all apps. - -By default, catalogs are assumed to be deployed using Helm 2. If you run an app in Rancher prior to v2.4.0, then upgrade to Rancher v2.4.0+, the app will still be managed by Helm 2. If the app was already using a Helm 3 Chart (API version 2) it will no longer work in v2.4.0+. You must either downgrade the chart's API version or recreate the catalog to use Helm 3. - -Charts that are specific to Helm 2 should only be added to a Helm 2 catalog, and Helm 3 specific charts should only be added to a Helm 3 catalog. - -# When to use Helm 3 - -_Applicable as of v2.4.0_ - -- If you want to ensure that the security permissions are being pulled from the kubeconfig file -- If you want to utilize apiVersion `v2` features such as creating a library chart to reduce code duplication, or moving your requirements from the `requirements.yaml` into the `Chart.yaml` - -Overall Helm 3 is a movement towards a more standardized Kubernetes feel. As the Kubernetes community has evolved, standards and best practices have as well. Helm 3 is an attempt to adopt those practices and streamline how charts are maintained. - -# Helm 3 Backwards Compatibility - -_Applicable as of v2.4.0_ - -With the use of the OpenAPI schema to validate your rendered templates in Helm 3, you will find charts that worked in Helm 2 may not work in Helm 3. This will require you to update your chart templates to meet the new validation requirements. This is one of the main reasons support for Helm 2 and Helm 3 was provided starting in Rancher 2.4.x, as not all charts can be deployed immediately in Helm 3. - -Helm 3 does not create a namespace for you, so you will have to provide an existing one. This can cause issues if you have integrated code with Helm 2, as you will need to make code changes to ensure a namespace is being created and passed in for Helm 3. Rancher will continue to manage namespaces for Helm to ensure this does not impact your app deployment. - -apiVersion `v2` is now reserved for Helm 3 charts. This apiVersion enforcement could cause issues as older versions of Helm 2 did not validate the apiVersion in the `Chart.yaml` file. In general, your Helm 2 chart’s apiVersion should be set to `v1` and your Helm 3 chart’s apiVersion should be set to `v2`. You can install charts with apiVersion `v1` with Helm 3, but you cannot install `v2` charts into Helm 2. - -# Built-in Global Catalogs - -Within Rancher, there are default catalogs packaged as part of Rancher. These can be enabled or disabled by an administrator. For details, refer to the section on managing [built-in global catalogs.]({{}}/rancher/v2.x/en/catalog/built-in) - -# Custom Catalogs - -There are two types of catalogs in Rancher: [Built-in global catalogs]({{}}/rancher/v2.x/en/catalog/built-in/) and [custom catalogs.]({{}}/rancher/v2.x/en/catalog/adding-catalogs/) - -Any user can create custom catalogs to add into Rancher. Custom catalogs can be added into Rancher at the global level, cluster level, or project level. For details, refer to the [section on adding custom catalogs]({{}}/rancher/v2.x/en/catalog/adding-catalogs) and the [catalog configuration reference.]({{}}/rancher/v2.x/en/catalog/catalog-config) - -# Creating and Launching Applications - -In Rancher, applications are deployed from the templates in a catalog. This section covers the following topics: - -* [Multi-cluster applications]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) -* [Creating catalog apps]({{}}/rancher/v2.x/en/catalog/creating-apps) -* [Launching catalog apps within a project]({{}}/rancher/v2.x/en/catalog/launching-apps) -* [Managing catalog apps]({{}}/rancher/v2.x/en/catalog/managing-apps) -* [Tutorial: Example custom chart creation]({{}}/rancher/v2.x/en/catalog/tutorial) - -# Chart Compatibility with Rancher - -Charts now support the fields `rancher_min_version` and `rancher_max_version` in the [`questions.yml` file](https://github.com/rancher/integration-test-charts/blob/master/charts/chartmuseum/v1.6.0/questions.yml) to specify the versions of Rancher that the chart is compatible with. When using the UI, only app versions that are valid for the version of Rancher running will be shown. API validation is done to ensure apps that don't meet the Rancher requirements cannot be launched. An app that is already running will not be affected on a Rancher upgrade if the newer Rancher version does not meet the app's requirements. - -# Global DNS - -_Available as v2.2.0_ - -When creating applications that span multiple Kubernetes clusters, a Global DNS entry can be created to route traffic to the endpoints in all of the different clusters. An external DNS server will need be programmed to assign a fully qualified domain name (a.k.a FQDN) to your application. Rancher will use the FQDN you provide and the IP addresses where your application is running to program the DNS. Rancher will gather endpoints from all the Kubernetes clusters running your application and program the DNS. - -For more information on how to use this feature, see [Global DNS]({{}}/rancher/v2.x/en/catalog/globaldns/). diff --git a/content/rancher/v2.5/en/managing-applications/creating-apps/_index.md b/content/rancher/v2.5/en/managing-applications/creating-apps/_index.md deleted file mode 100644 index 6c6876d4f96..00000000000 --- a/content/rancher/v2.5/en/managing-applications/creating-apps/_index.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Creating Library Apps -weight: 4 ---- - -> This page is under construction. - -Rancher's catalog service requires any custom catalogs to be structured in a specific format for the catalog service to be able to leverage it in Rancher. - -> For a complete walkthrough of developing charts, see the [Chart Template Developer's Guide](https://helm.sh/docs/chart_template_guide/) in the official Helm documentation. - -- [Chart types](#chart-types) - - [Helm charts](#helm-charts) - - [Rancher charts](#rancher-charts) -- [Chart directory structure](#chart-directory-structure) -- [Additional Files for Rancher Charts](#additional-files-for-rancher-charts) - - [questions.yml](#questions-yml) - - [Min/Max Rancher versions](#min-max-rancher-versions) - - [Question variable reference](#question-variable-reference) -- [Tutorial: Example Custom Chart Creation](#tutorial-example-custom-chart-creation) - -# Chart Types - -Rancher supports two different types of charts: Helm charts and Rancher charts. - -### Helm Charts - -Native Helm charts include an application along with other software required to run it. When deploying native Helm charts, you'll learn the chart's parameters and then configure them using **Answers**, which are sets of key value pairs. - -The Helm Stable and Helm Incubators are populated with native Helm charts. However, you can also use native Helm charts in Custom catalogs (although we recommend Rancher Charts). - -### Rancher Charts - -Rancher charts mirror native helm charts, although they add two files that enhance user experience: `app-readme.md` and `questions.yaml`. Read more about them in [Additional Files for Rancher Charts.](#additional-files-for-rancher-charts) - -Advantages of Rancher charts include: - -- **Enhanced revision tracking:** While Helm supports versioned deployments, Rancher adds tracking and revision history to display changes between different versions of the chart. -- **Streamlined application launch:** Rancher charts add simplified chart descriptions and configuration forms to make catalog application deployment easy. Rancher users need not read through the entire list of Helm variables to understand how to launch an application. -- **Application resource management:** Rancher tracks all the resources created by a specific application. Users can easily navigate to and troubleshoot on a page listing all the workload objects used to power an application. - -# Chart Directory Structure - -The following table demonstrates the directory structure for a chart, which can be found in a chart directory: `charts///`. This information is helpful when customizing charts for a custom catalog. Files denoted with **Rancher Specific** are specific to Rancher charts, but are optional for chart customization. - -``` -charts/// -|--charts/ # Directory containing dependency charts. -|--templates/ # Directory containing templates that, when combined with values.yml, generates Kubernetes YAML. -|--app-readme.md # Text displayed in the charts header within the Rancher UI.* -|--Chart.yml # Required Helm chart information file. -|--questions.yml # Form questions displayed within the Rancher UI. Questions display in Configuration Options.* -|--README.md # Optional: Helm Readme file displayed within Rancher UI. This text displays in Detailed Descriptions. -|--requirements.yml # Optional: YAML file listing dependencies for the chart. -|--values.yml # Default configuration values for the chart. -``` - -# Additional Files for Rancher Charts - -Before you create your own custom catalog, you should have a basic understanding about how a Rancher chart differs from a native Helm chart. Rancher charts differ slightly from Helm charts in their directory structures. Rancher charts include two files that Helm charts do not. - -- `app-readme.md` - - A file that provides descriptive text in the chart's UI header. The following image displays the difference between a Rancher chart (which includes `app-readme.md`) and a native Helm chart (which does not). - -
Rancher Chart with app-readme.md (left) vs. Helm Chart without (right)
- - ![app-readme.md]({{}}/img/rancher/app-readme.png) - -- `questions.yml` - - A file that contains questions for a form. These form questions simplify deployment of a chart. Without it, you must configure the deployment using key value pairs, which is more difficult. The following image displays the difference between a Rancher chart (which includes `questions.yml`) and a native Helm chart (which does not). - - -
Rancher Chart with questions.yml (left) vs. Helm Chart without (right)
- - ![questions.yml]({{}}/img/rancher/questions.png) - - -### questions.yml - -Inside the `questions.yml`, most of the content will be around the questions to ask the end user, but there are some additional fields that can be set in this file. - -### Min/Max Rancher versions - -For each chart, you can add the minimum and/or maximum Rancher version, which determines whether or not this chart is available to be deployed from Rancher. - -> **Note:** Even though Rancher release versions are prefixed with a `v`, there is *no* prefix for the release version when using this option. - -``` -rancher_min_version: 2.3.0 -rancher_max_version: 2.3.99 -``` - -### Question Variable Reference - -This reference contains variables that you can use in `questions.yml` nested under `questions:`. - -| Variable | Type | Required | Description | -| ------------- | ------------- | --- |------------- | -| variable | string | true | Define the variable name specified in the `values.yml` file, using `foo.bar` for nested objects. | -| label | string | true | Define the UI label. | -| description | string | false | Specify the description of the variable.| -| type | string | false | Default to `string` if not specified (current supported types are string, multiline, boolean, int, enum, password, storageclass, hostname, pvc, and secret).| -| required | bool | false | Define if the variable is required or not (true \| false)| -| default | string | false | Specify the default value. | -| group | string | false | Group questions by input value. | -| min_length | int | false | Min character length.| -| max_length | int | false | Max character length.| -| min | int | false | Min integer length. | -| max | int | false | Max integer length. | -| options | []string | false | Specify the options when the variable type is `enum`, for example: options:
- "ClusterIP"
- "NodePort"
- "LoadBalancer"| -| valid_chars | string | false | Regular expression for input chars validation. | -| invalid_chars | string | false | Regular expression for invalid input chars validation.| -| subquestions | []subquestion | false| Add an array of subquestions.| -| show_if | string | false | Show current variable if conditional variable is true. For example `show_if: "serviceType=Nodeport"` | -| show\_subquestion_if | string | false | Show subquestions if is true or equal to one of the options. for example `show_subquestion_if: "true"`| - ->**Note:** `subquestions[]` cannot contain `subquestions` or `show_subquestions_if` keys, but all other keys in the above table are supported. - -# Tutorial: Example Custom Chart Creation - -For a tutorial on adding a custom Helm chart to a custom catalog, refer to [this page.]({{}}/rancher/v2.x/en/catalog/tutorial) diff --git a/content/rancher/v2.5/en/managing-applications/creating-custom-libraries/_index.md b/content/rancher/v2.5/en/managing-applications/creating-custom-libraries/_index.md deleted file mode 100644 index 32533d3a1fa..00000000000 --- a/content/rancher/v2.5/en/managing-applications/creating-custom-libraries/_index.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Creating Custom Libraries -weight: 2 ---- - -> This page is under construction. - -Custom catalogs can be added into Rancher at a global scope, cluster scope, or project scope. - -- [Adding catalog repositories](#adding-catalog-repositories) - - [Add custom Git repositories](#add-custom-git-repositories) - - [Add custom Helm chart repositories](#add-custom-helm-chart-repositories) - - [Add private Git/Helm chart repositories](#add-private-git-helm-chart-repositories) -- [Adding global catalogs](#adding-global-catalogs) -- [Adding cluster level catalogs](#adding-cluster-level-catalogs) -- [Adding project level catalogs](#adding-project-level-catalogs) -- [Custom catalog configuration reference](#custom-catalog-configuration-reference) - -# Adding Catalog Repositories - -Adding a catalog is as simple as adding a catalog name, a URL and a branch name. - -**Prerequisite:** An [admin]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) of Rancher has the ability to add or remove catalogs globally in Rancher. - -### Add Custom Git Repositories -The Git URL needs to be one that `git clone` [can handle](https://git-scm.com/docs/git-clone#_git_urls_a_id_urls_a) and must end in `.git`. The branch name must be a branch that is in your catalog URL. If no branch name is provided, it will use the `master` branch by default. Whenever you add a catalog to Rancher, it will be available immediately. - -### Add Custom Helm Chart Repositories - -A Helm chart repository is an HTTP server that houses one or more packaged charts. Any HTTP server that can serve YAML files and tar files and can answer GET requests can be used as a repository server. - -Helm comes with built-in package server for developer testing (helm serve). The Helm team has tested other servers, including Google Cloud Storage with website mode enabled, S3 with website mode enabled or hosting custom chart repository server using open-source projects like [ChartMuseum](https://github.com/helm/chartmuseum). - -In Rancher, you can add the custom Helm chart repository with only a catalog name and the URL address of the chart repository. - -### Add Private Git/Helm Chart Repositories - -Private catalog repositories can be added using credentials like Username and Password. You may also want to use the OAuth token if your Git or Helm repository server supports that. - -For more information on private Git/Helm catalogs, refer to the [custom catalog configuration reference.]({{}}/rancher/v2.x/en/catalog/catalog-config) - - 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions prior to v2.2.0, you can select **Catalogs** directly in the navigation bar. - 2. Click **Add Catalog**. - 3. Complete the form and click **Create**. - - **Result:** Your catalog is added to Rancher. - -# Adding Global Catalogs - ->**Prerequisites:** In order to manage the [built-in catalogs]({{}}/rancher/v2.x/en/catalog/built-in/) or manage global catalogs, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Catalogs]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. - - 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions prior to v2.2.0, you can select **Catalogs** directly in the navigation bar. - 2. Click **Add Catalog**. - 3. Complete the form. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( -{{}}/rancher/v2.x/en/catalog/#catalog-helm-deployment-versions) -4. Click **Create**. - - **Result**: Your custom global catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [multi-cluster apps]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or [applications in any project]({{}}/rancher/v2.x/en/catalog/launching-apps/) from this catalog. - -# Adding Cluster Level Catalogs - ->**Prerequisites:** In order to manage cluster scoped catalogs, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Cluster Owner Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) ->- [Custom Cluster Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) with the [Manage Cluster Catalogs]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-role-reference) role assigned. - -1. From the **Global** view, navigate to your cluster that you want to start adding custom catalogs. -2. Choose the **Tools > Catalogs** in the navigation bar. -2. Click **Add Catalog**. -3. Complete the form. By default, the form will provide the ability to select `Scope` of the catalog. When you have added a catalog from the **Cluster** scope, it is defaulted to `Cluster`. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( -{{}}/rancher/v2.x/en/catalog/#catalog-helm-deployment-versions) -5. Click **Create**. - -**Result**: Your custom cluster catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in any project in that cluster]({{}}/rancher/v2.x/en/catalog/apps/) from this catalog. - -# Adding Project Level Catalogs - ->**Prerequisites:** In order to manage project scoped catalogs, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Cluster Owner Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) ->- [Project Owner Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) ->- [Custom Project Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) with the [Manage Project Catalogs]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) role assigned. - -1. From the **Global** view, navigate to your project that you want to start adding custom catalogs. -2. Choose the **Tools > Catalogs** in the navigation bar. -2. Click **Add Catalog**. -3. Complete the form. By default, the form will provide the ability to select `Scope` of the catalog. When you have added a catalog from the **Project** scope, it is defaulted to `Cluster`. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( -{{}}/rancher/v2.x/en/catalog/#catalog-helm-deployment-versions) -5. Click **Create**. - -**Result**: Your custom project catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in that project]({{}}/rancher/v2.x/en/catalog/apps/) from this catalog. - -# Custom Catalog Configuration Reference - -Refer to [this page]({{}}/rancher/v2.x/en/catalog/catalog-config) more information on configuring custom catalogs. \ No newline at end of file diff --git a/content/rancher/v2.5/en/managing-applications/enabling-builtin-libraries/_index.md b/content/rancher/v2.5/en/managing-applications/enabling-builtin-libraries/_index.md deleted file mode 100644 index a162db9daff..00000000000 --- a/content/rancher/v2.5/en/managing-applications/enabling-builtin-libraries/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Enabling and Disabling Built-in Global Libraries -weight: 100 ---- - -> This page is under construction. - -There are default global catalogs packaged as part of Rancher. - -Within Rancher, there are default catalogs packaged as part of Rancher. These can be enabled or disabled by an administrator. - ->**Prerequisites:** In order to manage the built-in catalogs or manage global catalogs, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Catalogs]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions-reference) role assigned. - -1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions prior to v2.2.0, you can select **Catalogs** directly in the navigation bar. - -2. Toggle the default catalogs that you want to be enabled or disabled: - - - **Library:** The Library Catalog includes charts curated by Rancher. Rancher stores charts in a Git repository to expedite the fetch and update of charts. This catalog features Rancher Charts, which include some [notable advantages]({{}}/rancher/v2.x/en/catalog/creating-apps/#rancher-charts) over native Helm charts. - - **Helm Stable:** This catalog, which is maintained by the Kubernetes community, includes native [Helm charts](https://helm.sh/docs/chart_template_guide/). This catalog features the largest pool of apps. - - **Helm Incubator:** Similar in user experience to Helm Stable, but this catalog is filled with applications in **beta**. - - **Result**: The chosen catalogs are enabled. Wait a few minutes for Rancher to replicate the catalog charts. When replication completes, you'll be able to see them in any of your projects by selecting **Apps** from the main navigation bar. In versions prior to v2.2.0, within a project, you can select **Catalog Apps** from the main navigation bar. diff --git a/content/rancher/v2.5/en/managing-applications/launching-apps/_index.md b/content/rancher/v2.5/en/managing-applications/launching-apps/_index.md deleted file mode 100644 index 7b411b55145..00000000000 --- a/content/rancher/v2.5/en/managing-applications/launching-apps/_index.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Launching Library Apps -weight: 7 ---- - -> This page is under construction. - -Within a project, when you want to deploy applications from catalogs, the applications available in your project will be based on the [scope of the catalogs]({{}}/rancher/v2.x/en/catalog/#catalog-scope). - -If your application is using ingresses, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry]({{}}/rancher/v2.x/en/catalog/globaldns/). - -- [Prerequisites](#prerequisites) -- [Launching a catalog app](#launching-a-catalog-app) -- [Configuration options](#configuration-options) - -# Prerequisites - -When Rancher deploys a catalog app, it launches an ephemeral instance of a Helm service account that has the permissions of the user deploying the catalog app. Therefore, a user cannot gain more access to the cluster through Helm or a catalog application than they otherwise would have. - -To launch an app from a catalog in Rancher, you must have at least one of the following permissions: - -- A [project-member role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) in the target cluster, which gives you the ability to create, read, update, and delete the workloads -- A [cluster owner role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) for the cluster that include the target project - -Before launching an app, you'll need to either [enable a built-in global catalog]({{}}/rancher/v2.x/en/catalog/built-in) or [add your own custom catalog.]({{}}/rancher/v2.x/en/catalog/adding-catalogs) - -# Launching a Catalog App - -1. From the **Global** view, open the project that you want to deploy an app to. - -2. From the main navigation bar, choose **Apps**. In versions prior to v2.2.0, choose **Catalog Apps** on the main navigation bar. Click **Launch**. - -3. Find the app that you want to launch, and then click **View Now**. - -4. Under **Configuration Options** enter a **Name**. By default, this name is also used to create a Kubernetes namespace for the application. - - * If you would like to change the **Namespace**, click **Customize** and enter a new name. - * If you want to use a different namespace that already exists, click **Customize**, and then click **Use an existing namespace**. Choose a namespace from the list. - -5. Select a **Template Version**. - -6. Complete the rest of the **Configuration Options**. - - * For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs), answers are provided as key value pairs in the **Answers** section. - * Keys and values are available within **Detailed Descriptions**. - * When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of --set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. For example, when entering an answer that includes two values separated by a comma (i.e., `abc, bcd`), wrap the values with double quotes (i.e., `"abc, bcd"`). - -7. Review the files in **Preview**. When you're satisfied, click **Launch**. - -**Result**: Your application is deployed to your chosen namespace. You can view the application status from the project's **Workloads** view or **Apps** view. In versions prior to v2.2.0, this is the **Catalog Apps** view. - -# Configuration Options - -For each Helm chart, there are a list of desired answers that must be entered in order to successfully deploy the chart. When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of –set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. - -> For example, when entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is required to wrap the values with double quotes (i.e., ``"abc, bcd"``). - -{{% tabs %}} -{{% tab "UI" %}} - -### Using a questions.yml file - -If the Helm chart that you are deploying contains a `questions.yml` file, Rancher's UI will translate this file to display an easy to use UI to collect the answers for the questions. - -### Key Value Pairs for Native Helm Charts - -For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository]({{}}/rancher/v2.x/en/catalog/custom/#custom-helm-chart-repository)), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. - -{{% /tab %}} -{{% tab "Editing YAML Files" %}} - -If you do not want to input answers using the UI, you can choose the **Edit as YAML** option. - -With this example YAML: - -```YAML -outer: - inner: value -servers: -- port: 80 - host: example -``` - -### Key Value Pairs - -You can have a YAML file that translates these fields to match how to [format custom values so that it can be used with `--set`](https://github.com/helm/helm/blob/master/docs/using_helm.md#the-format-and-limitations-of---set). - -These values would be translated to: - -``` -outer.inner=value -servers[0].port=80 -servers[0].host=example -``` - -### YAML files - -You can directly paste that YAML formatted structure into the YAML editor. By allowing custom values to be set using a YAML formatted structure, Rancher has the ability to easily customize for more complicated input values (e.g. multi-lines, array and JSON objects). -{{% /tab %}} -{{% /tabs %}} \ No newline at end of file diff --git a/content/rancher/v2.5/en/managing-applications/library-config/_index.md b/content/rancher/v2.5/en/managing-applications/library-config/_index.md deleted file mode 100644 index cf91b118956..00000000000 --- a/content/rancher/v2.5/en/managing-applications/library-config/_index.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Custom Library Configuration Reference -weight: 3 ---- - -> This page is under construction. - -Any user can create custom catalogs to add into Rancher. Besides the content of the catalog, users must ensure their catalogs are able to be added into Rancher. - -- [Types of Repositories](#types-of-repositories) -- [Custom Git Repository](#custom-git-repository) -- [Custom Helm Chart Repository](#custom-helm-chart-repository) -- [Catalog Fields](#catalog-fields) -- [Private Repositories](#private-repositories) - - [Using Username and Password](#using-username-and-password) - - [Using an OAuth token](#using-an-oauth-token) - -# Types of Repositories - -Rancher supports adding in different types of repositories as a catalog: - -* Custom Git Repository -* Custom Helm Chart Repository - -# Custom Git Repository - -The Git URL needs to be one that `git clone` [can handle](https://git-scm.com/docs/git-clone#_git_urls_a_id_urls_a) and must end in `.git`. The branch name must be a branch that is in your catalog URL. If no branch name is provided, it will default to use the `master` branch. Whenever you add a catalog to Rancher, it will be available almost immediately. - -# Custom Helm Chart Repository - -A Helm chart repository is an HTTP server that contains one or more packaged charts. Any HTTP server that can serve YAML files and tar files and can answer GET requests can be used as a repository server. - -Helm comes with a built-in package server for developer testing (`helm serve`). The Helm team has tested other servers, including Google Cloud Storage with website mode enabled, S3 with website mode enabled or hosting custom chart repository server using open-source projects like [ChartMuseum](https://github.com/helm/chartmuseum). - -In Rancher, you can add the custom Helm chart repository with only a catalog name and the URL address of the chart repository. - -# Catalog Fields - -When [adding your catalog]({{}}/rancher/v2.x/en/catalog/custom/adding/) to Rancher, you'll provide the following information: - - -| Variable | Description | -| -------------------- | ------------- | -| Name | Name for your custom catalog to distinguish the repositories in Rancher | -| Catalog URL | URL of your custom chart repository| -| Use Private Catalog | Selected if you are using a private repository that requires authentication | -| Username (Optional) | Username or OAuth Token | -| Password (Optional) | If you are authenticating using a username, enter the associated password. If you are using an OAuth token, use `x-oauth-basic`. | -| Branch | For a Git repository, the branch name. Default: `master`. For a Helm Chart repository, this field is ignored. | -| Helm version | The Helm version that will be used to deploy all of the charts in the catalog. This field cannot be changed later. For more information, refer to the [section on Helm versions.]({{}}/rancher/v2.x/en/catalog/#catalog-helm-deployment-versions) | - -# Private Repositories - -Private Git or Helm chart repositories can be added into Rancher using either credentials, i.e. `Username` and `Password`. Private Git repositories also support authentication using OAuth tokens. - -### Using Username and Password - -1. When [adding the catalog]({{}}/rancher/v2.x/en/catalog/custom/adding/), select the **Use private catalog** checkbox. - -2. Provide the `Username` and `Password` for your Git or Helm repository. - -### Using an OAuth token - -Read [using Git over HTTPS and OAuth](https://github.blog/2012-09-21-easier-builds-and-deployments-using-git-over-https-and-oauth/) for more details on how OAuth authentication works. - -1. Create an [OAuth token](https://github.com/settings/tokens) -with `repo` permission selected, and click **Generate token**. - -2. When [adding the catalog]({{}}/rancher/v2.x/en/catalog/custom/adding/), select the **Use private catalog** checkbox. - -3. For `Username`, provide the Git generated OAuth token. For `Password`, enter `x-oauth-basic`. diff --git a/content/rancher/v2.5/en/managing-applications/managing-apps/_index.md b/content/rancher/v2.5/en/managing-applications/managing-apps/_index.md deleted file mode 100644 index 32e05ca6ad4..00000000000 --- a/content/rancher/v2.5/en/managing-applications/managing-apps/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: Managing Library Apps -weight: 5 ---- - -> This page is under construction. - -After deploying an application, one of the benefits of using an application versus individual workloads/resources is the ease of being able to manage many workloads/resources applications. Apps can be cloned, upgraded or rolled back. - -- [Cloning catalog applications](#cloning-catalog-applications) -- [Upgrading catalog applications](#upgrading-catalog-applications) -- [Rolling back catalog applications](#rolling-back-catalog-applications) -- [Deleting catalog application deployments](#deleting-catalog-application-deployments) - -### Cloning Catalog Applications - -After an application is deployed, you can easily clone it to use create another application with almost the same configuration. It saves you the work of manually filling in duplicate information. - -### Upgrading Catalog Applications - -After an application is deployed, you can easily upgrade to a different template version. - -1. From the **Global** view, navigate to the project that contains the catalog application that you want to upgrade. - -1. From the main navigation bar, choose **Apps**. In versions prior to v2.2.0, choose **Catalog Apps** on the main navigation bar. Click **Launch**. - -3. Find the application that you want to upgrade, and then click the ⋮ to find **Upgrade**. - -4. Select the **Template Version** that you want to deploy. - -5. (Optional) Update your **Configuration Options**. - -6. (Optional) Select whether or not you want to force the catalog application to be upgraded by checking the box for **Delete and recreate resources if needed during the upgrade**. - - > In Kubernetes, some fields are designed to be immutable or cannot be updated directly. As of v2.2.0, you can now force your catalog application to be updated regardless of these fields. This will cause the catalog apps to be deleted and resources to be re-created if needed during the upgrade. - -7. Review the files in the **Preview** section. When you're satisfied, click **Launch**. - -**Result**: Your application is updated. You can view the application status from the project's: - -- **Workloads** view -- **Apps** view. In versions prior to v2.2.0, this is the **Catalog Apps** view. - - -### Rolling Back Catalog Applications - -After an application has been upgraded, you can easily rollback to a different template version. - -1. From the **Global** view, navigate to the project that contains the catalog application that you want to upgrade. - -1. From the main navigation bar, choose **Apps**. In versions prior to v2.2.0, choose **Catalog Apps** on the main navigation bar. Click **Launch**. - -3. Find the application that you want to rollback, and then click the ⋮ to find **Rollback**. - -4. Select the **Revision** that you want to roll back to. By default, Rancher saves up to the last 10 revisions. - -5. (Optional) Select whether or not you want to force the catalog application to be upgraded by checking the box for **Delete and recreate resources if needed during the upgrade**. - - > In Kubernetes, some fields are designed to be immutable or cannot be updated directly. As of v2.2.0, you can now force your catalog application to be updated regardless of these fields. This will cause the catalog apps to be deleted and resources to be re-created if needed during the rollback. - -7. Click **Rollback**. - -**Result**: Your application is updated. You can view the application status from the project's: - -- **Workloads** view -- **Apps** view. In versions prior to v2.2.0, this is the **Catalog Apps** view. - -### Deleting Catalog Application Deployments - -As a safeguard to prevent you from unintentionally deleting other catalog applications that share a namespace, deleting catalog applications themselves does not delete the namespace they're assigned to. - -Therefore, if you want to delete both an app and the namespace that contains the app, you should remove the app and the namespace separately: - -1. Uninstall the app using the app's `uninstall` function. - -1. From the **Global** view, navigate to the project that contains the catalog application that you want to delete. - -1. From the main menu, choose **Namespaces**. - -1. Find the namespace running your catalog app. Select it and click **Delete**. - -**Result:** The catalog application deployment and its namespace are deleted. diff --git a/content/rancher/v2.5/en/managing-applications/multi-cluster-apps/_index.md b/content/rancher/v2.5/en/managing-applications/multi-cluster-apps/_index.md deleted file mode 100644 index c925c687233..00000000000 --- a/content/rancher/v2.5/en/managing-applications/multi-cluster-apps/_index.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: Multi-Cluster Apps -weight: 6 ---- - -> This page is under construction. - -Typically, most applications are deployed on a single Kubernetes cluster, but there will be times you might want to deploy multiple copies of the same application across different clusters and/or projects. In Rancher, a _multi-cluster application_, is an application deployed using a Helm chart across multiple clusters. With the ability to deploy the same application across multiple clusters, it avoids the repetition of the same action on each cluster, which could introduce user error during application configuration. With multi-cluster applications, you can customize to have the same configuration across all projects/clusters as well as have the ability to change the configuration based on your target project. Since multi-cluster application is considered a single application, it's easy to manage and maintain this application. - -Any Helm charts from a global catalog can be used to deploy and manage multi-cluster applications. - -After creating a multi-cluster application, you can program a [Global DNS entry]({{}}/rancher/v2.x/en/catalog/globaldns/) to make it easier to access the application. - -- [Prerequisites](#prerequisites) -- [Launching a multi-cluster app](#launching-a-multi-cluster-app) -- [Multi-cluster app configuration options](#multi-cluster-app-configuration-options) - - [Targets](#targets) - - [Upgrades](#upgrades) - - [Roles](#roles) -- [Application configuration options](#application-configuration-options) - - [Using a questions.yml file](#using-a-questions-yml-file) - - [Key value pairs for native Helm charts](key-value-pairs-for-native-helm-charts) - - [Members](#members) - - [Overriding application configuration options for specific projects](#overriding-application-configuration-options-for-specific-projects) -- [Upgrading multi-cluster app roles and projects](#upgrading-multi-cluster-app-roles-and-projects) -- [Multi-cluster application management](#multi-cluster-application-managements) -- [Deleting a multi-cluster application](#deleting-a-multi-cluster-application) - -# Prerequisites - -To create a multi-cluster app in Rancher, you must have at least one of the following permissions: - -- A [project-member role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) in the target cluster(s), which gives you the ability to create, read, update, and delete the workloads -- A [cluster owner role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) for the clusters(s) that include the target project(s) - -# Launching a Multi-Cluster App - -1. From the **Global** view, choose **Apps** in the navigation bar. Click **Launch**. - -2. Find the application that you want to launch, and then click **View Details**. - -3. (Optional) Review the detailed descriptions, which are derived from the Helm chart's `README`. - -4. Under **Configuration Options** enter a **Name** for the multi-cluster application. By default, this name is also used to create a Kubernetes namespace in each [target project](#targets) for the multi-cluster application. The namespace is named as `-`. - -5. Select a **Template Version**. - -6. Complete the [multi-cluster applications specific configuration options](#multi-cluster-app-configuration-options) as well as the [application configuration options](#application-configuration-options). - -7. Select the **Members** who can [interact with the multi-cluster application](#members). - -8. Add any [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects) that would change the configuration for specific project(s) from the default application configuration answers. - -7. Review the files in the **Preview** section. When you're satisfied, click **Launch**. - -**Result**: Your application is deployed to your chosen namespace. You can view the application status from the project's: - -# Multi-cluster App Configuration Options - -Rancher has divided the configuration option for the multi-cluster application into several sections. - -### Targets - -In the **Targets** section, select the projects that you want the application to be deployed in. The list of projects is based on what projects you have access to. For each project that you select, it will be added to the list, which shows the cluster name and project name that were selected. To remove a target project, click on **-**. - -### Upgrades - -In the **Upgrades** section, select the upgrade strategy to use, when you decide to upgrade your application. - -* **Rolling Update (batched):** When selecting this upgrade strategy, the number of applications upgraded at a time is based on the selected **Batch size** and the **Interval** specifies how many seconds to wait before starting the next batch of updates. - -* **Upgrade all apps simultaneously:** When selecting this upgrade strategy, all applications across all projects will be upgraded at the same time. - -### Roles - -In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications]({{}}/rancher/v2.x/en/catalog/launching-apps), that specific user's permissions are used for creation of all workloads/resources that is required by the app. - -For multi-cluster applications, the application is deployed by a _system user_ and is assigned as the creator of all underlying resources. A _system user_ is used instead of the actual user due to the fact that the actual user could be removed from one of the target projects. If the actual user was removed from one of the projects, then that user would no longer be able to manage the application for the other projects. - -Rancher will let you select from two options for Roles, **Project** and **Cluster**. Rancher will allow creation using any of these roles based on the user's permissions. - -- **Project** - This is the equivalent of a [project member]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), a [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or a [project owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles), then the user is considered to have the appropriate level of permissions. - -- **Cluster** - This is the equivalent of a [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _cluster owner_ role, if the user is an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), then the user is considered to have the appropriate level of permissions. - -When launching the application, Rancher will confirm if you have these permissions in the target projects before launching the application. - -> **Note:** There are some applications like _Grafana_ or _Datadog_ that require access to specific cluster-scoped resources. These applications will require the _Cluster_ role. If you find out later that the application requires cluster roles, the multi-cluster application can be upgraded to update the roles. - -# Application Configuration Options - -For each Helm chart, there are a list of desired answers that must be entered in order to successfully deploy the chart. When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of –set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. - -> For example, when entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is required to wrap the values with double quotes (i.e., ``"abc, bcd"``). - -### Using a questions.yml file - -If the Helm chart that you are deploying contains a `questions.yml` file, Rancher's UI will translate this file to display an easy to use UI to collect the answers for the questions. - -### Key Value Pairs for Native Helm Charts - -For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository]({{}}/rancher/v2.x/en/catalog/custom/#custom-helm-chart-repository)), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. - -### Members - -By default, multi-cluster applications can only be managed by the user who created it. In the **Members** section, other users can be added so that they can also help manage or view the multi-cluster application. - -1. Find the user that you want to add by typing in the member's name in the **Member** search box. - -2. Select the **Access Type** for that member. There are three access types for a multi-cluster project, but due to how the permissions of a multi-cluster application are launched, please read carefully to understand what these access types mean. - - - **Owner**: This access type can manage any configuration part of the multi-cluster application including the template version, the [multi-cluster applications specific configuration options](#Multi-cluster App Configuration Options), the [application specific configuration options](#application-configuration-options), the members who can interact with the multi-cluster application and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _owner_ of the multi-cluster application can manage/remove applications in [target projects](#targets) without explicitly having access to these project(s). Only trusted users should be provided with this access type. - - - **Member**: This access type can only modify the template version, the [application specific configuration options](#application-configuration-options) and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _member_ of the multi-cluster application can modify the application without explicitly having access to these project(s). Only trusted users should be provided with this access type. - - - **Read-only**: This access type cannot modify any configuration option for the multi-cluster application. Users can only view these applications. - - > **Note:** Please ensure only trusted users are given _Owner_ or _Member_ access as they will automatically be able to manage applications created for this multi-cluster application in target projects they might not have direct access to. - -### Overriding Application Configuration Options for Specific Projects - -The ability to use the same configuration to deploy the same application across multiple clusters/projects is one of the main benefits of multi-cluster applications. There might be a specific project that requires a slightly different configuration option, but you want to manage that application with all the other matching applications. Instead of creating a brand new application, you can override specific [application specific configuration options](#application-configuration-options) for specific projects. - -1. In the **Answer Overrides** section, click **Add Override**. - -2. For each override, you can select the following: - - - **Scope**: Select which target projects you want to override the answer in the configuration option. - - - **Question**: Select which question you want to override. - - - **Answer**: Enter the answer that you want to be used instead. - -# Upgrading Multi-Cluster App Roles and Projects - -- **Changing Roles on an existing Multi-Cluster app** -The creator and any users added with the access-type "owner" to a multi-cluster app, can upgrade its Roles. When adding a new Role, we check if the user has that exact role in all current target projects. These checks allow the same relaxations for global admins, cluster owners and project-owners as described in the installation section for the field `Roles`. - -- **Adding/Removing target projects** -1. The creator and any users added with access-type "owner" to a multi-cluster app, can add or remove its target projects. When adding a new project, we check if the caller of this request has all Roles defined on multi-cluster app, in the new projects they want to add. The roles checks are again relaxed for global admins, cluster-owners and project-owners. -2. We do not do these membership checks when removing target projects. This is because the caller's permissions could have with respect to the target project, or the project could have been deleted and hence the caller wants to remove it from targets list. - - -# Multi-Cluster Application Management - -One of the benefits of using a multi-cluster application as opposed to multiple individual applications of the same type, is the ease of management. Multi-cluster applications can be cloned, upgraded or rolled back. - -1. From the **Global** view, choose **Apps** in the navigation bar. - -2. Choose the multi-cluster application you want to take one of these actions on and click the **⋮**. Select one of the following options: - - * **Clone**: Creates another multi-cluster application with the same configuration. By using this option, you can easily duplicate a multi-cluster application. - * **Upgrade**: Upgrade your multi-cluster application to change some part of the configuration. When performing an upgrade for multi-cluster application, the [upgrade strategy](#upgrades) can be modified if you have the correct [access type](#members). - * **Rollback**: Rollback your application to a specific version. If after an upgrade, there are issues for your multi-cluster application for one or more of your [targets](#targets), Rancher has stored up to 10 versions of the multi-cluster application. Rolling back a multi-cluster application reverts the application for **all** target clusters and projects, not just the targets(s) affected by the upgrade issue. - -# Deleting a Multi-Cluster Application - -1. From the **Global** view, choose **Apps** in the navigation bar. - -2. Choose the multi-cluster application you want to delete and click the **⋮ > Delete**. When deleting the multi-cluster application, all applications and namespaces are deleted in all of the target projects. - - > **Note:** The applications in the target projects, that are created for a multi-cluster application, cannot be deleted individually. The applications can only be deleted when the multi-cluster application is deleted. diff --git a/content/rancher/v2.5/en/managing-applications/tutorial/_index.md b/content/rancher/v2.5/en/managing-applications/tutorial/_index.md deleted file mode 100644 index 2ed509e1a6a..00000000000 --- a/content/rancher/v2.5/en/managing-applications/tutorial/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: "Tutorial: Example Custom Chart Creation" -weight: 8 ---- - -In this tutorial, you'll learn how to create a Helm chart and deploy it to a repository. The repository can then be used as a source for a custom catalog in Rancher. - -You can fill your custom catalogs with either Helm Charts or Rancher Charts, although we recommend Rancher Charts due to their enhanced user experience. - -> For a complete walkthrough of developing charts, see the upstream Helm chart [developer reference](https://helm.sh/docs/chart_template_guide/). - -1. Within the GitHub repo that you're using as your custom catalog, create a directory structure that mirrors the structure listed in [Chart Directory Structure](../creating-apps/#chart-directory-structure). - - Rancher requires this directory structure, although `app-readme.md` and `questions.yml` are optional. - - >**Tip:** - > - >- To begin customizing a chart, copy one from either the [Rancher Library](https://github.com/rancher/charts) or the [Helm Stable](https://github.com/kubernetes/charts/tree/master/stable). - >- For a complete walk through of developing charts, see the upstream Helm chart [developer reference](https://docs.helm.sh/developing_charts/). - -2. **Recommended:** Create an `app-readme.md` file. - - Use this file to create custom text for your chart's header in the Rancher UI. You can use this text to notify users that the chart is customized for your environment or provide special instruction on how to use it. -
-
- **Example**: - - ``` - $ cat ./app-readme.md - - # Wordpress ROCKS! - ``` - -3. **Recommended:** Create a `questions.yml` file. - - This file creates a form for users to specify deployment parameters when they deploy the custom chart. Without this file, users **must** specify the parameters manually using key value pairs, which isn't user-friendly. -
-
- The example below creates a form that prompts users for persistent volume size and a storage class. -
-
- For a list of variables you can use when creating a `questions.yml` file, see [Question Variable Reference]({{}}/rancher/v2.x/en/catalog/creating-apps/#question-variable-reference). - - ```yaml - categories: - - Blog - - CMS - questions: - - variable: persistence.enabled - default: "false" - description: "Enable persistent volume for WordPress" - type: boolean - required: true - label: WordPress Persistent Volume Enabled - show_subquestion_if: true - group: "WordPress Settings" - subquestions: - - variable: persistence.size - default: "10Gi" - description: "WordPress Persistent Volume Size" - type: string - label: WordPress Volume Size - - variable: persistence.storageClass - default: "" - description: "If undefined or null, uses the default StorageClass. Default to null" - type: storageclass - label: Default StorageClass for WordPress - ``` - -4. Check the customized chart into your GitHub repo. - -**Result:** Your custom chart is added to the repo. Your Rancher Server will replicate the chart within a few minutes. diff --git a/content/rancher/v2.5/en/monitoring/legacy/_index.md b/content/rancher/v2.5/en/monitoring/legacy/_index.md deleted file mode 100644 index 9240f82fa7f..00000000000 --- a/content/rancher/v2.5/en/monitoring/legacy/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Legacy UI Docs -weight: 2 ---- - -> This section is under construction. \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring/legacy/alerts/_index.md b/content/rancher/v2.5/en/monitoring/legacy/alerts/_index.md deleted file mode 100644 index 1b0b9685295..00000000000 --- a/content/rancher/v2.5/en/monitoring/legacy/alerts/_index.md +++ /dev/null @@ -1,243 +0,0 @@ ---- -title: Alerts -weight: 2 ---- - -To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. - -Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) and [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) of events they need to address. - -Before you can receive alerts, you must configure one or more notifier in Rancher. - -When you create a cluster, some alert rules are predefined. You can receive these alerts if you configure a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) for them. - -For details about what triggers the predefined alerts, refer to the [documentation on default alerts.]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts) - -This section covers the following topics: - -- [Alert event examples](#alert-event-examples) - - [Prometheus queries](#prometheus-queries) -- [Urgency levels](#urgency-levels) -- [Scope of alerts](#scope-of-alerts) -- [Adding cluster alerts](#adding-cluster-alerts) -- [Managing cluster alerts](#managing-cluster-alerts) - -# Alert Event Examples - -Some examples of alert events are: - -- A Kubernetes [master component]({{}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) entering an unhealthy state. -- A node or [workload]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/) error occurring. -- A scheduled deployment taking place as planned. -- A node's hardware resources becoming overstressed. - -### Prometheus Queries - -> **Prerequisite:** Monitoring must be [enabled]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring) before you can trigger alerts with custom Prometheus queries or expressions. - -When you edit an alert rule, you will have the opportunity to configure the alert to be triggered based on a Prometheus expression. For examples of expressions, refer to [this page.]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression) - -# Urgency Levels - -You can set an urgency level for each alert. This urgency appears in the notification you receive, helping you to prioritize your response actions. For example, if you have an alert configured to inform you of a routine deployment, no action is required. These alerts can be assigned a low priority level. However, if a deployment fails, it can critically impact your organization, and you need to react quickly. Assign these alerts a high priority level. - -# Scope of Alerts - -The scope for alerts can be set at either the cluster level or [project level]({{}}/rancher/v2.x/en/project-admin/tools/alerts/). - -At the cluster level, Rancher monitors components in your Kubernetes cluster, and sends you alerts related to: - -- The state of your nodes. -- The system services that manage your Kubernetes cluster. -- The resource events from specific system services. -- The Prometheus expression cross the thresholds - -# Adding Cluster Alerts - -As a [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send you alerts for cluster events. - ->**Prerequisite:** Before you can receive cluster alerts, you must [add a notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers). - -1. From the **Global** view, navigate to the cluster that you want to configure cluster alerts for. Select **Tools > Alerts**. Then click **Add Alert Group**. - -1. Enter a **Name** for the alert that describes its purpose, you could group alert rules for the different purpose. - -1. Based on the type of alert you want to create, complete one of the instruction subsets below. -{{% accordion id="system-service" label="System Service Alerts" %}} -This alert type monitor for events that affect one of the Kubernetes master components, regardless of the node it occurs on. - -1. Select the **System Services** option, and then select an option from the drop-down. - - - [controller-manager](https://kubernetes.io/docs/concepts/overview/components/#kube-controller-manager) - - [etcd](https://kubernetes.io/docs/concepts/overview/components/#etcd) - - [scheduler](https://kubernetes.io/docs/concepts/overview/components/#kube-scheduler) - -1. Select the urgency level of the alert. The options are: - - - **Critical**: Most urgent - - **Warning**: Normal urgency - - **Info**: Least urgent -
-
- Select the urgency level based on the importance of the service and how many nodes fill the role within your cluster. For example, if you're making an alert for the `etcd` service, select **Critical**. If you're making an alert for redundant schedulers, **Warning** is more appropriate. - -1. Configure advanced options. By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - - - **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. - - **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. - - **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -{{% /accordion %}} -{{% accordion id="resource-event" label="Resource Event Alerts" %}} -This alert type monitors for specific events that are thrown from a resource type. - -1. Choose the type of resource event that triggers an alert. The options are: - - - **Normal**: triggers an alert when any standard resource event occurs. - - **Warning**: triggers an alert when unexpected resource events occur. - -1. Select a resource type from the **Choose a Resource** drop-down that you want to trigger an alert. - - - [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) - - [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) - - [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) - - [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) - - [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) - -1. Select the urgency level of the alert. - - - **Critical**: Most urgent - - **Warning**: Normal urgency - - **Info**: Least urgent -
-
- Select the urgency level of the alert by considering factors such as how often the event occurs or its importance. For example: - - - If you set a normal alert for pods, you're likely to receive alerts often, and individual pods usually self-heal, so select an urgency of **Info**. - - If you set a warning alert for StatefulSets, it's very likely to impact operations, so select an urgency of **Critical**. - -1. Configure advanced options. By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - - - **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. - - **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. - - **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -{{% /accordion %}} -{{% accordion id="node" label="Node Alerts" %}} -This alert type monitors for events that occur on a specific node. - -1. Select the **Node** option, and then make a selection from the **Choose a Node** drop-down. - -1. Choose an event to trigger the alert. - - - **Not Ready**: Sends you an alert when the node is unresponsive. - - **CPU usage over**: Sends you an alert when the node raises above an entered percentage of its processing allocation. - - **Mem usage over**: Sends you an alert when the node raises above an entered percentage of its memory allocation. - -1. Select the urgency level of the alert. - - - **Critical**: Most urgent - - **Warning**: Normal urgency - - **Info**: Least urgent -
-
- Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's CPU raises above 60% deems an urgency of **Info**, but a node that is **Not Ready** deems an urgency of **Critical**. - -1. Configure advanced options. By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - - - **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. - - **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. - - **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -{{% /accordion %}} -{{% accordion id="node-selector" label="Node Selector Alerts" %}} -This alert type monitors for events that occur on any node on marked with a label. For more information, see the Kubernetes documentation for [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). - -1. Select the **Node Selector** option, and then click **Add Selector** to enter a key value pair for a label. This label should be applied to one or more of your nodes. Add as many selectors as you'd like. - -1. Choose an event to trigger the alert. - - - **Not Ready**: Sends you an alert when selected nodes are unresponsive. - - **CPU usage over**: Sends you an alert when selected nodes raise above an entered percentage of processing allocation. - - **Mem usage over**: Sends you an alert when selected nodes raise above an entered percentage of memory allocation. - -1. Select the urgency level of the alert. - - - **Critical**: Most urgent - - **Warning**: Normal urgency - - **Info**: Least urgent -
-
- Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's CPU raises above 60% deems an urgency of **Info**, but a node that is **Not Ready** deems an urgency of **Critical**. - -1. Configure advanced options. By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - - - **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. - - **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. - - **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -{{% /accordion %}} -{{% accordion id="cluster-expression" label="Metric Expression Alerts" %}} -This alert type monitors for the overload from Prometheus expression querying, it would be available after you enable monitoring. - -1. Input or select an **Expression**, the drop down shows the original metrics from Prometheus, including: - - - [**Node**](https://github.com/prometheus/node_exporter) - - [**Container**](https://github.com/google/cadvisor) - - [**ETCD**](https://etcd.io/docs/v3.4.0/op-guide/monitoring/) - - [**Kubernetes Components**](https://github.com/kubernetes/metrics) - - [**Kubernetes Resources**](https://github.com/kubernetes/kube-state-metrics) - - [**Fluentd**](https://docs.fluentd.org/v1.0/articles/monitoring-prometheus) (supported by [Logging]({{}}/rancher/v2.x//en/cluster-admin/tools/logging)) - - [**Cluster Level Grafana**](http://docs.grafana.org/administration/metrics/) - - **Cluster Level Prometheus** - -1. Choose a **Comparison**. - - - **Equal**: Trigger alert when expression value equal to the threshold. - - **Not Equal**: Trigger alert when expression value not equal to the threshold. - - **Greater Than**: Trigger alert when expression value greater than to threshold. - - **Less Than**: Trigger alert when expression value equal or less than the threshold. - - **Greater or Equal**: Trigger alert when expression value greater to equal to the threshold. - - **Less or Equal**: Trigger alert when expression value less or equal to the threshold. - -1. Input a **Threshold**, for trigger alert when the value of expression cross the threshold. - -1. Choose a **Comparison**. - -1. Select a duration, for trigger alert when expression value crosses the threshold longer than the configured duration. - -1. Select the urgency level of the alert. - - - **Critical**: Most urgent - - **Warning**: Normal urgency - - **Info**: Least urgent -
-
- Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's load expression ```sum(node_load5) / count(node_cpu_seconds_total{mode="system"})``` raises above 0.6 deems an urgency of **Info**, but 1 deems an urgency of **Critical**. - -1. Configure advanced options. By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - - - **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. - - **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. - - **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -{{% /accordion %}} - -1. Continue adding more **Alert Rule** to the group. - -1. Finally, choose the [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) to send the alerts to. - - - You can set up multiple notifiers. - - You can change notifier recipients on the fly. - -**Result:** Your alert is configured. A notification is sent when the alert is triggered. - -# Managing Cluster Alerts - -After you set up cluster alerts, you can manage each alert object. To manage alerts, browse to the cluster containing the alerts, and then select **Tools > Alerts** that you want to manage. You can: - -- Deactivate/Reactive alerts -- Edit alert settings -- Delete unnecessary alerts -- Mute firing alerts -- Unmute muted alerts diff --git a/content/rancher/v2.5/en/monitoring/legacy/alerts/default-alerts/_index.md b/content/rancher/v2.5/en/monitoring/legacy/alerts/default-alerts/_index.md deleted file mode 100644 index ea7f91ff0e0..00000000000 --- a/content/rancher/v2.5/en/monitoring/legacy/alerts/default-alerts/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Default Alerts for Cluster Monitoring -weight: 1 ---- - -When you create a cluster, some alert rules are predefined. These alerts notify you about signs that the cluster could be unhealthy. You can receive these alerts if you configure a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) for them. - -Several of the alerts use Prometheus expressions as the metric that triggers the alert. For more information on how expressions work, you can refer to the Rancher [documentation about Prometheus expressions]({{}} -/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/) or the Prometheus [documentation about querying metrics](https://prometheus.io/docs/prometheus/latest/querying/basics/). - -# Alerts for etcd -Etcd is the key-value store that contains the state of the Kubernetes cluster. Rancher provides default alerts if the built-in monitoring detects a potential problem with etcd. You don't have to enable monitoring to receive these alerts. - -A leader is the node that handles all client requests that need cluster consensus. For more information, you can refer to this [explanation of how etcd works.](https://rancher.com/blog/2019/2019-01-29-what-is-etcd/#how-does-etcd-work) - -The leader of the cluster can change in response to certain events. It is normal for the leader to change, but too many changes can indicate a problem with the network or a high CPU load. With longer latencies, the default etcd configuration may cause frequent heartbeat timeouts, which trigger a new leader election. - -| Alert | Explanation | -|-------|-------------| -| A high number of leader changes within the etcd cluster are happening | A warning alert is triggered when the leader changes more than three times in one hour. | -| Database usage close to the quota 500M | A warning alert is triggered when the size of etcd exceeds 500M.| -| Etcd is unavailable | A critical alert is triggered when etcd becomes unavailable. | -| Etcd member has no leader | A critical alert is triggered when the etcd cluster does not have a leader for at least three minutes. | - - -# Alerts for Kubernetes Components -Rancher provides alerts when core Kubernetes system components become unhealthy. - -Controllers update Kubernetes resources based on changes in etcd. The [controller manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) monitors the cluster desired state through the Kubernetes API server and makes the necessary changes to the current state to reach the desired state. - -The [scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) service is a core component of Kubernetes. It is responsible for scheduling cluster workloads to nodes, based on various configurations, metrics, resource requirements and workload-specific requirements. - -| Alert | Explanation | -|-------|-------------| -| Controller Manager is unavailable | A critical warning is triggered when the cluster’s controller-manager becomes unavailable. | -| Scheduler is unavailable | A critical warning is triggered when the cluster’s scheduler becomes unavailable. | - - -# Alerts for Events -Kubernetes events are objects that provide insight into what is happening inside a cluster, such as what decisions were made by the scheduler or why some pods were evicted from the node. In the Rancher UI, from the project view, you can see events for each workload. - -| Alert | Explanation | -|-------|-------------| -| Get warning deployment event | A warning alert is triggered when a warning event happens on a deployment. | - - -# Alerts for Nodes -Alerts can be triggered based on node metrics. Each computing resource in a Kubernetes cluster is called a node. [Nodes]({{}}/rancher/v2.x/en/cluster-admin/#kubernetes-cluster-node-components) can be either bare-metal servers or virtual machines. - -| Alert | Explanation | -|-------|-------------| -| High CPU load | A warning alert is triggered if the node uses more than 100 percent of the node’s available CPU seconds for at least three minutes. | -| High node memory utilization | A warning alert is triggered if the node uses more than 80 percent of its available memory for at least three minutes. | -| Node disk is running full within 24 hours | A critical alert is triggered if the disk space on the node is expected to run out in the next 24 hours based on the disk growth over the last 6 hours. | - -# Project-level Alerts -When you enable monitoring for the project, some project-level alerts are provided. For details, refer to the [section on project-level alerts.]({{}}/rancher/v2.x/en/project-admin/tools/alerts/#default-project-level-alerts) \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring/legacy/alerts/expression/_index.md b/content/rancher/v2.5/en/monitoring/legacy/alerts/expression/_index.md deleted file mode 100644 index 9f5170c9779..00000000000 --- a/content/rancher/v2.5/en/monitoring/legacy/alerts/expression/_index.md +++ /dev/null @@ -1,430 +0,0 @@ ---- -title: Prometheus Expressions -weight: 4 ---- - -The PromQL expressions in this doc can be used to configure [alerts.]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) - -> Before expression can be used in alerts, monitoring must be enabled. For more information, refer to the documentation on enabling monitoring [at the cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring) or [at the project level.]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring) - -For more information about querying Prometheus, refer to the official [Prometheus documentation.](https://prometheus.io/docs/prometheus/latest/querying/basics/) - - - -- [Cluster Metrics](#cluster-metrics) - - [Cluster CPU Utilization](#cluster-cpu-utilization) - - [Cluster Load Average](#cluster-load-average) - - [Cluster Memory Utilization](#cluster-memory-utilization) - - [Cluster Disk Utilization](#cluster-disk-utilization) - - [Cluster Disk I/O](#cluster-disk-i-o) - - [Cluster Network Packets](#cluster-network-packets) - - [Cluster Network I/O](#cluster-network-i-o) -- [Node Metrics](#node-metrics) - - [Node CPU Utilization](#node-cpu-utilization) - - [Node Load Average](#node-load-average) - - [Node Memory Utilization](#node-memory-utilization) - - [Node Disk Utilization](#node-disk-utilization) - - [Node Disk I/O](#node-disk-i-o) - - [Node Network Packets](#node-network-packets) - - [Node Network I/O](#node-network-i-o) -- [Etcd Metrics](#etcd-metrics) - - [Etcd Has a Leader](#etcd-has-a-leader) - - [Number of Times the Leader Changes](#number-of-times-the-leader-changes) - - [Number of Failed Proposals](#number-of-failed-proposals) - - [GRPC Client Traffic](#grpc-client-traffic) - - [Peer Traffic](#peer-traffic) - - [DB Size](#db-size) - - [Active Streams](#active-streams) - - [Raft Proposals](#raft-proposals) - - [RPC Rate](#rpc-rate) - - [Disk Operations](#disk-operations) - - [Disk Sync Duration](#disk-sync-duration) -- [Kubernetes Components Metrics](#kubernetes-components-metrics) - - [API Server Request Latency](#api-server-request-latency) - - [API Server Request Rate](#api-server-request-rate) - - [Scheduling Failed Pods](#scheduling-failed-pods) - - [Controller Manager Queue Depth](#controller-manager-queue-depth) - - [Scheduler E2E Scheduling Latency](#scheduler-e2e-scheduling-latency) - - [Scheduler Preemption Attempts](#scheduler-preemption-attempts) - - [Ingress Controller Connections](#ingress-controller-connections) - - [Ingress Controller Request Process Time](#ingress-controller-request-process-time) -- [Rancher Logging Metrics](#rancher-logging-metrics) - - [Fluentd Buffer Queue Rate](#fluentd-buffer-queue-rate) - - [Fluentd Input Rate](#fluentd-input-rate) - - [Fluentd Output Errors Rate](#fluentd-output-errors-rate) - - [Fluentd Output Rate](#fluentd-output-rate) -- [Workload Metrics](#workload-metrics) - - [Workload CPU Utilization](#workload-cpu-utilization) - - [Workload Memory Utilization](#workload-memory-utilization) - - [Workload Network Packets](#workload-network-packets) - - [Workload Network I/O](#workload-network-i-o) - - [Workload Disk I/O](#workload-disk-i-o) -- [Pod Metrics](#pod-metrics) - - [Pod CPU Utilization](#pod-cpu-utilization) - - [Pod Memory Utilization](#pod-memory-utilization) - - [Pod Network Packets](#pod-network-packets) - - [Pod Network I/O](#pod-network-i-o) - - [Pod Disk I/O](#pod-disk-i-o) -- [Container Metrics](#container-metrics) - - [Container CPU Utilization](#container-cpu-utilization) - - [Container Memory Utilization](#container-memory-utilization) - - [Container Disk I/O](#container-disk-i-o) - - - -# Cluster Metrics - -### Cluster CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])) by (instance))` | -| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])))` | - -### Cluster Load Average - -| Catalog | Expression | -| --- | --- | -| Detail |
load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
| -| Summary |
load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"})`
load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"})`
load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"})`
| - -### Cluster Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)` | -| Summary | `1 - sum(node_memory_MemAvailable_bytes) / sum(node_memory_MemTotal_bytes)` | - -### Cluster Disk Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance) - sum(node_filesystem_free_bytes{device!="rootfs"}) by (instance)) / sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance)` | -| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs"}) - sum(node_filesystem_free_bytes{device!="rootfs"})) / sum(node_filesystem_size_bytes{device!="rootfs"})` | - -### Cluster Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(node_disk_read_bytes_total[5m])) by (instance)`
written`sum(rate(node_disk_written_bytes_total[5m])) by (instance)`
| -| Summary |
read`sum(rate(node_disk_read_bytes_total[5m]))`
written`sum(rate(node_disk_written_bytes_total[5m]))`
| - -### Cluster Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
| -| Summary |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
| - -### Cluster Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
| -| Summary |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
| - -# Node Metrics - -### Node CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `avg(irate(node_cpu_seconds_total{mode!="idle", instance=~"$instance"}[5m])) by (mode)` | -| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle", instance=~"$instance"}[5m])))` | - -### Node Load Average - -| Catalog | Expression | -| --- | --- | -| Detail |
load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
| -| Summary |
load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
| - -### Node Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"})` | -| Summary | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"}) ` | - -### Node Disk Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"}) by (device)) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device)` | -| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"})) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"})` | - -### Node Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
| -| Summary |
read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
| - -### Node Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
| -| Summary |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
| - -### Node Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
| -| Summary |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
| - -# Etcd Metrics - -### Etcd Has a Leader - -`max(etcd_server_has_leader)` - -### Number of Times the Leader Changes - -`max(etcd_server_leader_changes_seen_total)` - -### Number of Failed Proposals - -`sum(etcd_server_proposals_failed_total)` - -### GRPC Client Traffic - -| Catalog | Expression | -| --- | --- | -| Detail |
in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m])) by (instance)`
out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m])) by (instance)`
| -| Summary |
in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m]))`
out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m]))`
| - -### Peer Traffic - -| Catalog | Expression | -| --- | --- | -| Detail |
in`sum(rate(etcd_network_peer_received_bytes_total[5m])) by (instance)`
out`sum(rate(etcd_network_peer_sent_bytes_total[5m])) by (instance)`
| -| Summary |
in`sum(rate(etcd_network_peer_received_bytes_total[5m]))`
out`sum(rate(etcd_network_peer_sent_bytes_total[5m]))`
| - -### DB Size - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(etcd_debugging_mvcc_db_total_size_in_bytes) by (instance)` | -| Summary | `sum(etcd_debugging_mvcc_db_total_size_in_bytes)` | - -### Active Streams - -| Catalog | Expression | -| --- | --- | -| Detail |
lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance)`
watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance)`
| -| Summary |
lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})`
watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})`
| - -### Raft Proposals - -| Catalog | Expression | -| --- | --- | -| Detail |
applied`sum(increase(etcd_server_proposals_applied_total[5m])) by (instance)`
committed`sum(increase(etcd_server_proposals_committed_total[5m])) by (instance)`
pending`sum(increase(etcd_server_proposals_pending[5m])) by (instance)`
failed`sum(increase(etcd_server_proposals_failed_total[5m])) by (instance)`
| -| Summary |
applied`sum(increase(etcd_server_proposals_applied_total[5m]))`
committed`sum(increase(etcd_server_proposals_committed_total[5m]))`
pending`sum(increase(etcd_server_proposals_pending[5m]))`
failed`sum(increase(etcd_server_proposals_failed_total[5m]))`
| - -### RPC Rate - -| Catalog | Expression | -| --- | --- | -| Detail |
total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m])) by (instance)`
fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m])) by (instance)`
| -| Summary |
total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m]))`
fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m]))`
| - -### Disk Operations - -| Catalog | Expression | -| --- | --- | -| Detail |
commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m])) by (instance)`
fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m])) by (instance)`
| -| Summary |
commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m]))`
fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m]))`
| - -### Disk Sync Duration - -| Catalog | Expression | -| --- | --- | -| Detail |
wal`histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le))`
db`histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le))`
| -| Summary |
wal`sum(histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le)))`
db`sum(histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le)))`
| - -# Kubernetes Components Metrics - -### API Server Request Latency - -| Catalog | Expression | -| --- | --- | -| Detail | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance, verb) /1e+06` | -| Summary | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance) /1e+06` | - -### API Server Request Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(apiserver_request_count[5m])) by (instance, code)` | -| Summary | `sum(rate(apiserver_request_count[5m])) by (instance)` | - -### Scheduling Failed Pods - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(kube_pod_status_scheduled{condition="false"})` | -| Summary | `sum(kube_pod_status_scheduled{condition="false"})` | - -### Controller Manager Queue Depth - -| Catalog | Expression | -| --- | --- | -| Detail |
volumes`sum(volumes_depth) by instance`
deployment`sum(deployment_depth) by instance`
replicaset`sum(replicaset_depth) by instance`
service`sum(service_depth) by instance`
serviceaccount`sum(serviceaccount_depth) by instance`
endpoint`sum(endpoint_depth) by instance`
daemonset`sum(daemonset_depth) by instance`
statefulset`sum(statefulset_depth) by instance`
replicationmanager`sum(replicationmanager_depth) by instance`
| -| Summary |
volumes`sum(volumes_depth)`
deployment`sum(deployment_depth)`
replicaset`sum(replicaset_depth)`
service`sum(service_depth)`
serviceaccount`sum(serviceaccount_depth)`
endpoint`sum(endpoint_depth)`
daemonset`sum(daemonset_depth)`
statefulset`sum(statefulset_depth)`
replicationmanager`sum(replicationmanager_depth)`
| - -### Scheduler E2E Scheduling Latency - -| Catalog | Expression | -| --- | --- | -| Detail | `histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06` | -| Summary | `sum(histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06)` | - -### Scheduler Preemption Attempts - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(scheduler_total_preemption_attempts[5m])) by (instance)` | -| Summary | `sum(rate(scheduler_total_preemption_attempts[5m]))` | - -### Ingress Controller Connections - -| Catalog | Expression | -| --- | --- | -| Detail |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"}) by (instance)`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"}) by (instance)`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"}) by (instance)`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m]))) by (instance)`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m]))) by (instance)`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m]))) by (instance)`
| -| Summary |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"})`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"})`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"})`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m])))`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m])))`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m])))`
| - -### Ingress Controller Request Process Time - -| Catalog | Expression | -| --- | --- | -| Detail | `topk(10, histogram_quantile(0.95,sum by (le, host, path)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | -| Summary | `topk(10, histogram_quantile(0.95,sum by (le, host)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | - -# Rancher Logging Metrics - - -### Fluentd Buffer Queue Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_buffer_queue_length[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_output_status_buffer_queue_length[5m]))` | - -### Fluentd Input Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_input_status_num_records_total[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_input_status_num_records_total[5m]))` | - -### Fluentd Output Errors Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_num_errors[5m])) by (type)` | -| Summary | `sum(rate(fluentd_output_status_num_errors[5m]))` | - -### Fluentd Output Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_num_records_total[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_output_status_num_records_total[5m]))` | - -# Workload Metrics - -### Workload CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""}) by (pod_name)` | -| Summary | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""})` | - -### Workload Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -# Pod Metrics - -### Pod CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
| -| Summary |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
| - -### Pod Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""}) by (container_name)` | -| Summary | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""})` | - -### Pod Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| -| Summary |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -### Pod Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| -| Summary |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -### Pod Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
| -| Summary |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -# Container Metrics - -### Container CPU Utilization - -| Catalog | Expression | -| --- | --- | -| cfs throttled seconds | `sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| usage seconds | `sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| system seconds | `sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| user seconds | `sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | - -### Container Memory Utilization - -`sum(container_memory_working_set_bytes{namespace="$namespace",pod_name="$podName",container_name="$containerName"})` - -### Container Disk I/O - -| Catalog | Expression | -| --- | --- | -| read | `sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| write | `sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | diff --git a/content/rancher/v2.5/en/monitoring/legacy/monitoring/_index.md b/content/rancher/v2.5/en/monitoring/legacy/monitoring/_index.md deleted file mode 100644 index 1cb62c544bf..00000000000 --- a/content/rancher/v2.5/en/monitoring/legacy/monitoring/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Monitoring -weight: 1 ---- \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring/legacy/monitoring/cluster-metrics/_index.md b/content/rancher/v2.5/en/monitoring/legacy/monitoring/cluster-metrics/_index.md deleted file mode 100644 index 0d7175aaeb8..00000000000 --- a/content/rancher/v2.5/en/monitoring/legacy/monitoring/cluster-metrics/_index.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Cluster Metrics -weight: 3 ---- -Cluster metrics display the hardware utilization for all nodes in your cluster, regardless of its role. They give you a global monitoring insight into the cluster. - -Some of the biggest metrics to look out for: - -- **CPU Utilization** - - High load either indicates that your cluster is running efficiently or that you're running out of CPU resources. - -- **Disk Utilization** - - Be on the lookout for increased read and write rates on nodes nearing their disk capacity. This advice is especially true for etcd nodes, as running out of storage on an etcd node leads to cluster failure. - -- **Memory Utilization** - - Deltas in memory utilization usually indicate a memory leak. - -- **Load Average** - - Generally, you want your load average to match your number of logical CPUs for the cluster. For example, if your cluster has 8 logical CPUs, the ideal load average would be 8 as well. If you load average is well under the number of logical CPUs for the cluster, you may want to reduce cluster resources. On the other hand, if your average is over 8, your cluster may need more resources. - -## Finding Node Metrics - -1. From the **Global** view, navigate to the cluster that you want to view metrics. - -1. Select **Nodes** in the navigation bar. - -1. Select a specific node and click on its name. - -1. Click on **Node Metrics**. - -[_Get expressions for Cluster Metrics_]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#cluster-metrics) - -### Etcd Metrics - ->**Note:** Only supported for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -Etcd metrics display the operations of the etcd database on each of your cluster nodes. After establishing a baseline of normal etcd operational metrics, observe them for abnormal deltas between metric refreshes, which indicate potential issues with etcd. Always address etcd issues immediately! - -You should also pay attention to the text at the top of the etcd metrics, which displays leader election statistics. This text indicates if etcd currently has a leader, which is the etcd instance that coordinates the other etcd instances in your cluster. A large increase in leader changes implies etcd is unstable. If you notice a change in leader election statistics, you should investigate them for issues. - -Some of the biggest metrics to look out for: - -- **Etcd has a leader** - - etcd is usually deployed on multiple nodes and elects a leader to coordinate its operations. If etcd does not have a leader, its operations are not being coordinated. - -- **Number of leader changes** - - If this statistic suddenly grows, it usually indicates network communication issues that constantly force the cluster to elect a new leader. - -[_Get expressions for Etcd Metrics_]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#etcd-metrics) - -### Kubernetes Components Metrics - -Kubernetes components metrics display data about the cluster's individual Kubernetes components. Primarily, it displays information about connections and latency for each component: the API server, controller manager, scheduler, and ingress controller. - ->**Note:** The metrics for the controller manager, scheduler and ingress controller are only supported for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -When analyzing Kubernetes component metrics, don't be concerned about any single standalone metric in the charts and graphs that display. Rather, you should establish a baseline for metrics considered normal following a period of observation, e.g. the range of values that your components usually operate within and are considered normal. After you establish this baseline, be on the lookout for large deltas in the charts and graphs, as these big changes usually indicate a problem that you need to investigate. - -Some of the more important component metrics to monitor are: - -- **API Server Request Latency** - - Increasing API response times indicate there's a generalized problem that requires investigation. - -- **API Server Request Rate** - - Rising API request rates usually coincide with increased API response times. Increased request rates also indicate a generalized problem requiring investigation. - -- **Scheduler Preemption Attempts** - - If you see a spike in scheduler preemptions, it's an indication that you're running out of hardware resources, as Kubernetes is recognizing it doesn't have enough resources to run all your pods and is prioritizing the more important ones. - -- **Scheduling Failed Pods** - - Failed pods can have a variety of causes, such as unbound persistent volume claims, exhausted hardware resources, non-responsive nodes, etc. - -- **Ingress Controller Request Process Time** - - How fast ingress is routing connections to your cluster services. - -[_Get expressions for Kubernetes Component Metrics_]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#kubernetes-components-metrics) - -## Rancher Logging Metrics - -Although the Dashboard for a cluster primarily displays data sourced from Prometheus, it also displays information for cluster logging, provided that you have [configured Rancher to use a logging service]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/). - -[_Get expressions for Rancher Logging Metrics_]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#rancher-logging-metrics) - -## Finding Workload Metrics - -Workload metrics display the hardware utilization for a Kubernetes workload. You can also view metrics for [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [stateful sets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) and so on. - -1. From the **Global** view, navigate to the project that you want to view workload metrics. - -1. From the main navigation bar, choose **Resources > Workloads.** In versions prior to v2.3.0, choose **Workloads** on the main navigation bar. - -1. Select a specific workload and click on its name. - -1. In the **Pods** section, select a specific pod and click on its name. - - - **View the Pod Metrics:** Click on **Pod Metrics**. - - **View the Container Metrics:** In the **Containers** section, select a specific container and click on its name. Click on **Container Metrics**. - -[_Get expressions for Workload Metrics_]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/#workload-metrics) diff --git a/content/rancher/v2.5/en/monitoring/legacy/monitoring/custom-metrics/_index.md b/content/rancher/v2.5/en/monitoring/legacy/monitoring/custom-metrics/_index.md deleted file mode 100644 index a78b62ee206..00000000000 --- a/content/rancher/v2.5/en/monitoring/legacy/monitoring/custom-metrics/_index.md +++ /dev/null @@ -1,489 +0,0 @@ ---- -title: Prometheus Custom Metrics Adapter -weight: 5 ---- - -After you've enabled [cluster level monitoring]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring), You can view the metrics data from Rancher. You can also deploy the Prometheus custom metrics adapter then you can use the HPA with metrics stored in cluster monitoring. - -## Deploy Prometheus Custom Metrics Adapter - -We are going to use the [Prometheus custom metrics adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter/releases/tag/v0.5.0), version v0.5.0. This is a great example for the [custom metrics server](https://github.com/kubernetes-incubator/custom-metrics-apiserver). And you must be the *cluster owner* to execute following steps. - -- Get the service account of the cluster monitoring is using. It should be configured in the workload ID: `statefulset:cattle-prometheus:prometheus-cluster-monitoring`. And if you didn't customize anything, the service account name should be `cluster-monitoring`. - -- Grant permission to that service account. You will need two kinds of permission. -One role is `extension-apiserver-authentication-reader` in `kube-system`, so you will need to create a `Rolebinding` to in `kube-system`. This permission is to get api aggregation configuration from config map in `kube-system`. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: custom-metrics-auth-reader - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: -- kind: ServiceAccount - name: cluster-monitoring - namespace: cattle-prometheus -``` - -The other one is cluster role `system:auth-delegator`, so you will need to create a `ClusterRoleBinding`. This permission is to have subject access review permission. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: custom-metrics:system:auth-delegator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: cluster-monitoring - namespace: cattle-prometheus -``` - -- Create configuration for custom metrics adapter. Following is an example configuration. There will be a configuration details in next session. - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: adapter-config - namespace: cattle-prometheus -data: - config.yaml: | - rules: - - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - seriesFilters: [] - resources: - overrides: - namespace: - resource: namespace - pod_name: - resource: pod - name: - matches: ^container_(.*)_seconds_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>) - - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - seriesFilters: - - isNot: ^container_.*_seconds_total$ - resources: - overrides: - namespace: - resource: namespace - pod_name: - resource: pod - name: - matches: ^container_(.*)_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>) - - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - seriesFilters: - - isNot: ^container_.*_total$ - resources: - overrides: - namespace: - resource: namespace - pod_name: - resource: pod - name: - matches: ^container_(.*)$ - as: "" - metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}) by (<<.GroupBy>>) - - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' - seriesFilters: - - isNot: .*_total$ - resources: - template: <<.Resource>> - name: - matches: "" - as: "" - metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) - - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' - seriesFilters: - - isNot: .*_seconds_total - resources: - template: <<.Resource>> - name: - matches: ^(.*)_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) - - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' - seriesFilters: [] - resources: - template: <<.Resource>> - name: - matches: ^(.*)_seconds_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) - resourceRules: - cpu: - containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) - nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[1m])) by (<<.GroupBy>>) - resources: - overrides: - instance: - resource: node - namespace: - resource: namespace - pod_name: - resource: pod - containerLabel: container_name - memory: - containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>) - nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>) - resources: - overrides: - instance: - resource: node - namespace: - resource: namespace - pod_name: - resource: pod - containerLabel: container_name - window: 1m -``` - -- Create HTTPS TLS certs for your api server. You can use following command to create a self-signed cert. - -```bash -openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out serving.crt -keyout serving.key -subj "/C=CN/CN=custom-metrics-apiserver.cattle-prometheus.svc.cluster.local" -# And you will find serving.crt and serving.key in your path. And then you are going to create a secret in cattle-prometheus namespace. -kubectl create secret generic -n cattle-prometheus cm-adapter-serving-certs --from-file=serving.key=./serving.key --from-file=serving.crt=./serving.crt -``` - -- Then you can create the prometheus custom metrics adapter. And you will need a service for this deployment too. Creating it via Import YAML or Rancher would do. Please create those resources in `cattle-prometheus` namespaces. - -Here is the prometheus custom metrics adapter deployment. -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: custom-metrics-apiserver - name: custom-metrics-apiserver - namespace: cattle-prometheus -spec: - replicas: 1 - selector: - matchLabels: - app: custom-metrics-apiserver - template: - metadata: - labels: - app: custom-metrics-apiserver - name: custom-metrics-apiserver - spec: - serviceAccountName: cluster-monitoring - containers: - - name: custom-metrics-apiserver - image: directxman12/k8s-prometheus-adapter-amd64:v0.5.0 - args: - - --secure-port=6443 - - --tls-cert-file=/var/run/serving-cert/serving.crt - - --tls-private-key-file=/var/run/serving-cert/serving.key - - --logtostderr=true - - --prometheus-url=http://prometheus-operated/ - - --metrics-relist-interval=1m - - --v=10 - - --config=/etc/adapter/config.yaml - ports: - - containerPort: 6443 - volumeMounts: - - mountPath: /var/run/serving-cert - name: volume-serving-cert - readOnly: true - - mountPath: /etc/adapter/ - name: config - readOnly: true - - mountPath: /tmp - name: tmp-vol - volumes: - - name: volume-serving-cert - secret: - secretName: cm-adapter-serving-certs - - name: config - configMap: - name: adapter-config - - name: tmp-vol - emptyDir: {} - -``` - -Here is the service of the deployment. -```yaml -apiVersion: v1 -kind: Service -metadata: - name: custom-metrics-apiserver - namespace: cattle-prometheus -spec: - ports: - - port: 443 - targetPort: 6443 - selector: - app: custom-metrics-apiserver -``` - -- Create API service for your custom metric server. - -```yaml -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.custom.metrics.k8s.io -spec: - service: - name: custom-metrics-apiserver - namespace: cattle-prometheus - group: custom.metrics.k8s.io - version: v1beta1 - insecureSkipTLSVerify: true - groupPriorityMinimum: 100 - versionPriority: 100 - -``` - -- Then you can verify your custom metrics server by `kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1`. If you see the return datas from the api, it means that the metrics server has been successfully set up. - -- You create HPA with custom metrics now. Here is an example of HPA. You will need to create a nginx deployment in your namespace first. - -```yaml -kind: HorizontalPodAutoscaler -apiVersion: autoscaling/v2beta1 -metadata: - name: nginx -spec: - scaleTargetRef: - # point the HPA at the nginx deployment you just created - apiVersion: apps/v1 - kind: Deployment - name: nginx - # autoscale between 1 and 10 replicas - minReplicas: 1 - maxReplicas: 10 - metrics: - # use a "Pods" metric, which takes the average of the - # given metric across all pods controlled by the autoscaling target - - type: Pods - pods: - metricName: memory_usage_bytes - targetAverageValue: 5000000 -``` - -And then, you should see your nginx is scaling up. HPA with custom metrics works. - -## Configuration of prometheus custom metrics adapter - -> Refer to https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config.md - -The adapter determines which metrics to expose, and how to expose them, -through a set of "discovery" rules. Each rule is executed independently -(so make sure that your rules are mutually exclusive), and specifies each -of the steps the adapter needs to take to expose a metric in the API. - -Each rule can be broken down into roughly four parts: - -- *Discovery*, which specifies how the adapter should find all Prometheus - metrics for this rule. - -- *Association*, which specifies how the adapter should determine which - Kubernetes resources a particular metric is associated with. - -- *Naming*, which specifies how the adapter should expose the metric in - the custom metrics API. - -- *Querying*, which specifies how a request for a particular metric on one - or more Kubernetes objects should be turned into a query to Prometheus. - -A more comprehensive configuration file can be found in -[sample-config.yaml](sample-config.yaml), but a basic config with one rule -might look like: - -```yaml -rules: -# this rule matches cumulative cAdvisor metrics measured in seconds -- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - resources: - # skip specifying generic resource<->label mappings, and just - # attach only pod and namespace resources by mapping label names to group-resources - overrides: - namespace: {resource: "namespace"}, - pod_name: {resource: "pod"}, - # specify that the `container_` and `_seconds_total` suffixes should be removed. - # this also introduces an implicit filter on metric family names - name: - # we use the value of the capture group implicitly as the API name - # we could also explicitly write `as: "$1"` - matches: "^container_(.*)_seconds_total$" - # specify how to construct a query to fetch samples for a given series - # This is a Go template where the `.Series` and `.LabelMatchers` string values - # are available, and the delimiters are `<<` and `>>` to avoid conflicts with - # the prometheus query language - metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)" -``` - -### Discovery - -Discovery governs the process of finding the metrics that you want to -expose in the custom metrics API. There are two fields that factor into -discovery: `seriesQuery` and `seriesFilters`. - -`seriesQuery` specifies Prometheus series query (as passed to the -`/api/v1/series` endpoint in Prometheus) to use to find some set of -Prometheus series. The adapter will strip the label values from this -series, and then use the resulting metric-name-label-names combinations -later on. - -In many cases, `seriesQuery` will be sufficient to narrow down the list of -Prometheus series. However, sometimes (especially if two rules might -otherwise overlap), it's useful to do additional filtering on metric -names. In this case, `seriesFilters` can be used. After the list of -series is returned from `seriesQuery`, each series has its metric name -filtered through any specified filters. - -Filters may be either: - -- `is: `, which matches any series whose name matches the specified - regex. - -- `isNot: `, which matches any series whose name does not match the - specified regex. - -For example: - -```yaml -# match all cAdvisor metrics that aren't measured in seconds -seriesQuery: '{__name__=~"^container_.*_total",container_name!="POD",namespace!="",pod_name!=""}' -seriesFilters: - isNot: "^container_.*_seconds_total" -``` - -### Association - -Association governs the process of figuring out which Kubernetes resources -a particular metric could be attached to. The `resources` field controls -this process. - -There are two ways to associate resources with a particular metric. In -both cases, the value of the label becomes the name of the particular -object. - -One way is to specify that any label name that matches some particular -pattern refers to some group-resource based on the label name. This can -be done using the `template` field. The pattern is specified as a Go -template, with the `Group` and `Resource` fields representing group and -resource. You don't necessarily have to use the `Group` field (in which -case the group is guessed by the system). For instance: - -```yaml -# any label `kube__` becomes . in Kubernetes -resources: - template: "kube_<<.Group>>_<<.Resource>>" -``` - -The other way is to specify that some particular label represents some -particular Kubernetes resource. This can be done using the `overrides` -field. Each override maps a Prometheus label to a Kubernetes -group-resource. For instance: - -```yaml -# the microservice label corresponds to the apps.deployment resource -resource: - overrides: - microservice: {group: "apps", resource: "deployment"} -``` - -These two can be combined, so you can specify both a template and some -individual overrides. - -The resources mentioned can be any resource available in your kubernetes -cluster, as long as you've got a corresponding label. - -### Naming - -Naming governs the process of converting a Prometheus metric name into -a metric in the custom metrics API, and vice versa. It's controlled by -the `name` field. - -Naming is controlled by specifying a pattern to extract an API name from -a Prometheus name, and potentially a transformation on that extracted -value. - -The pattern is specified in the `matches` field, and is just a regular -expression. If not specified, it defaults to `.*`. - -The transformation is specified by the `as` field. You can use any -capture groups defined in the `matches` field. If the `matches` field -doesn't contain capture groups, the `as` field defaults to `$0`. If it -contains a single capture group, the `as` field defautls to `$1`. -Otherwise, it's an error not to specify the as field. - -For example: - -```yaml -# match turn any name _total to _per_second -# e.g. http_requests_total becomes http_requests_per_second -name: - matches: "^(.*)_total$" - as: "${1}_per_second" -``` - -### Querying - -Querying governs the process of actually fetching values for a particular -metric. It's controlled by the `metricsQuery` field. - -The `metricsQuery` field is a Go template that gets turned into -a Prometheus query, using input from a particular call to the custom -metrics API. A given call to the custom metrics API is distilled down to -a metric name, a group-resource, and one or more objects of that -group-resource. These get turned into the following fields in the -template: - -- `Series`: the metric name -- `LabelMatchers`: a comma-separated list of label matchers matching the - given objects. Currently, this is the label for the particular - group-resource, plus the label for namespace, if the group-resource is - namespaced. -- `GroupBy`: a comma-separated list of labels to group by. Currently, - this contains the group-resource label used in `LabelMatchers`. - -For instance, suppose we had a series `http_requests_total` (exposed as -`http_requests_per_second` in the API) with labels `service`, `pod`, -`ingress`, `namespace`, and `verb`. The first four correspond to -Kubernetes resources. Then, if someone requested the metric -`pods/http_request_per_second` for the pods `pod1` and `pod2` in the -`somens` namespace, we'd have: - -- `Series: "http_requests_total"` -- `LabelMatchers: "pod=~\"pod1|pod2",namespace="somens"` -- `GroupBy`: `pod` - -Additionally, there are two advanced fields that are "raw" forms of other -fields: - -- `LabelValuesByName`: a map mapping the labels and values from the - `LabelMatchers` field. The values are pre-joined by `|` - (for used with the `=~` matcher in Prometheus). -- `GroupBySlice`: the slice form of `GroupBy`. - -In general, you'll probably want to use the `Series`, `LabelMatchers`, and -`GroupBy` fields. The other two are for advanced usage. - -The query is expected to return one value for each object requested. The -adapter will use the labels on the returned series to associate a given -series back to its corresponding object. - -For example: - -```yaml -# convert cumulative cAdvisor metrics into rates calculated over 2 minutes -metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)" -``` diff --git a/content/rancher/v2.5/en/monitoring/legacy/monitoring/prometheus/_index.md b/content/rancher/v2.5/en/monitoring/legacy/monitoring/prometheus/_index.md deleted file mode 100644 index 9325c12a9eb..00000000000 --- a/content/rancher/v2.5/en/monitoring/legacy/monitoring/prometheus/_index.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Prometheus Configuration -weight: 1 ---- - -While configuring monitoring at either the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring) or [project level]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring), there are multiple options that can be configured. - -Option | Description --------|------------- -Data Retention | How long your Prometheus instance retains monitoring data scraped from Rancher objects before it's purged. -[Enable Node Exporter](#node-exporter) | Whether or not to deploy the node exporter. -Node Exporter Host Port | The host port on which data is exposed, i.e. data that Prometheus collects from your node hardware. Required if you have enabled the node exporter. -[Enable Persistent Storage](#persistent-storage) for Prometheus | Whether or not to configure storage for Prometheus so that metrics can be retained even if the Prometheus pod fails. -[Enable Persistent Storage](#persistent-storage) for Grafana | Whether or not to configure storage for Grafana so that the Grafana dashboards and configuration can be retained even if the Grafana pod fails. -Prometheus [CPU Limit](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu) | CPU resource limit for the Prometheus pod. -Prometheus [CPU Reservation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu) | CPU reservation for the Prometheus pod. -Prometheus [Memory Limit](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) | Memory resource limit for the Prometheus pod. -Prometheus [Memory Reservation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) | Memory resource requests for the Prometheus pod. -Selector | Ability to select the nodes in which Prometheus and Grafana pods are deployed to. To use this option, the nodes must have labels. -Advanced Options | Since monitoring is an [application](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) from the [Rancher catalog]({{}}/rancher/v2.x/en/catalog/), it can be [configured like other catalog application]({{}}/rancher/v2.x/en/catalog/apps/#configuration-options). _Warning: Any modification to the application without understanding the entire application can lead to catastrophic errors._ - -## Node Exporter - -The [node exporter](https://github.com/prometheus/node_exporter/blob/master/README.md) is a popular open source exporter, which exposes the metrics for hardware and \*NIX kernels OS. It is designed to monitor the host system. However, there are still issues with namespaces when running it in a container, mostly around filesystem mount spaces. In order to monitor actual network metrics for the container network, the node exporter must be deployed with the `hostNetwork` mode. - -When configuring Prometheus and enabling the node exporter, enter a host port in the **Node Exporter Host Port** that will not produce port conflicts with existing applications. The host port chosen must be open to allow internal traffic between Prometheus and the Node Exporter. - ->**Warning:** In order for Prometheus to collect the metrics of the node exporter, after enabling cluster monitoring, you must open the Node Exporter Host Port in the host firewall rules to allow intranet access. By default, `9796` is used as that host port. - -## Persistent Storage - ->**Prerequisite:** Configure one or more [storage class]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/#adding-storage-classes) to use as [persistent storage]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) for your Prometheus or Grafana pod. - -By default, when you enable Prometheus for either a cluster or project, all monitoring data that Prometheus collects is stored on its own pod. With local storage, if the Prometheus or Grafana pods fail, all the data is lost. Rancher recommends configuring an external persistent storage to the cluster. With the external persistent storage, if the Prometheus or Grafana pods fail, the new pods can recover using data from the persistent storage. - -When enabling persistent storage for Prometheus or Grafana, specify the size of the persistent volume and select the [storage class]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/#storage-classes). - -## Remote Storage - ->**Prerequisite:** Need a remote storage endpoint to be available. The possible list of integrations is available [here](https://prometheus.io/docs/operating/integrations/) - -Using advanced options, remote storage integration for the Prometheus installation can be configured as follows: - -``` -prometheus.remoteWrite[0].url = http://remote1/push -prometheus.remoteWrite[0].remoteTimeout = 33s - -prometheus.remoteWrite[1].url = http://remote2/push - - -prometheus.remoteRead[0].url = http://remote1/read -prometheus.remoteRead[0].proxyUrl = http://proxy.url -prometheus.remoteRead[0].bearerToken = token-value - -prometheus.remoteRead[1].url = http://remote2/read -prometheus.remoteRead[1].remoteTimeout = 33s -prometheus.remoteRead[1].readRecent = true -``` - -Additional fields can be set up based on the [ReadSpec](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotereadspec) and [RemoteWriteSpec](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotewritespec) diff --git a/content/rancher/v2.5/en/monitoring/legacy/monitoring/viewing-metrics/_index.md b/content/rancher/v2.5/en/monitoring/legacy/monitoring/viewing-metrics/_index.md deleted file mode 100644 index 412ead2d7b5..00000000000 --- a/content/rancher/v2.5/en/monitoring/legacy/monitoring/viewing-metrics/_index.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: Viewing Metrics -weight: 2 ---- - -After you've enabled monitoring at either the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring) or [project level]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring), you will want to be start viewing the data being collected. There are multiple ways to view this data. - -## Rancher Dashboard - ->**Note:** This is only available if you've enabled monitoring at the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring). Project specific analytics must be viewed using the project's Grafana instance. - -Rancher's dashboards are available at multiple locations: - -- **Cluster Dashboard**: From the **Global** view, navigate to the cluster. -- **Node Metrics**: From the **Global** view, navigate to the cluster. Select **Nodes**. Find the individual node and click on its name. Click **Node Metrics.** -- **Workload Metrics**: From the **Global** view, navigate to the project. From the main navigation bar, choose **Resources > Workloads.** (In versions prior to v2.3.0, choose **Workloads** on the main navigation bar.) Find the individual workload and click on its name. Click **Workload Metrics.** -- **Pod Metrics**: From the **Global** view, navigate to the project. Select **Workloads > Workloads**. Find the individual workload and click on its name. Find the individual pod and click on its name. Click **Pod Metrics.** -- **Container Metrics**: From the **Global** view, navigate to the project. From the main navigation bar, choose **Resources > Workloads.** (In versions prior to v2.3.0, choose **Workloads** on the main navigation bar.) Find the individual workload and click on its name. Find the individual pod and click on its name. Find the individual container and click on its name. Click **Container Metrics.** - -Prometheus metrics are displayed and are denoted with the Grafana icon. If you click on the icon, the metrics will open a new tab in Grafana. - -Within each Prometheus metrics widget, there are several ways to customize your view. - -- Toggle between two views: - - **Detail**: Displays graphs and charts that let you view each event in a Prometheus time series - - **Summary** Displays events in a Prometheus time series that are outside the norm. -- Change the range of the time series that you're viewing to see a more refined or expansive data sample. -- Customize the data sample to display data between specific dates and times. - -When analyzing these metrics, don't be concerned about any single standalone metric in the charts and graphs. Rather, you should establish a baseline for your metrics over the course of time, e.g. the range of values that your components usually operate within and are considered normal. After you establish the baseline, be on the lookout for any large deltas in the charts and graphs, as these big changes usually indicate a problem that you need to investigate. - -## Grafana - -If you've enabled monitoring at either the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#enabling-cluster-monitoring) or [project level]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring), Rancher automatically creates a link to Grafana instance. Use this link to view monitoring data. - -Grafana allows you to query, visualize, alert, and ultimately, understand your cluster and workload data. For more information on Grafana and its capabilities, visit the [Grafana website](https://grafana.com/grafana). - -### Authentication - -Rancher determines which users can access the new Grafana instance, as well as the objects they can view within it, by validating them against the user's [cluster or project roles]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/). In other words, a user's access in Grafana mirrors their access in Rancher. - -When you go to the Grafana instance, you will be logged in with the username `admin` and the password `admin`. If you log out and log in again, you will be prompted to change your password. You will only have access to the URL of the Grafana instance if you have access to view the corresponding metrics in Rancher. So for example, if your Rancher permissions are scoped to the project level, you won't be able to see the Grafana instance for cluster-level metrics. - -### Accessing the Cluster-level Grafana Instance - -1. From the **Global** view, navigate to a cluster that has monitoring enabled. - -1. Go to the **System** project view. This project is where the cluster-level Grafana instance runs. - -1. Click **Apps.** In versions prior to v2.2.0, choose **Catalog Apps** on the main navigation bar. - -1. Go to the `cluster-monitoring` application. - -1. In the `cluster-monitoring` application, there are two `/index.html` links: one that leads to a Grafana instance and one that leads to a Prometheus instance. When you click the Grafana link, it will redirect you to a new webpage for Grafana, which shows metrics for the cluster. - -1. You will be signed in to the Grafana instance automatically. The default username is `admin` and the default password is `admin`. For security, we recommend that you log out of Grafana, log back in with the `admin` password, and change your password. - -**Results:** You are logged into Grafana from the Grafana instance. After logging in, you can view the preset Grafana dashboards, which are imported via the [Grafana provisioning mechanism](http://docs.grafana.org/administration/provisioning/#dashboards), so you cannot modify them directly. For now, if you want to configure your own dashboards, clone the original and modify the new copy. diff --git a/content/rancher/v2.5/en/monitoring/legacy/notifiers/_index.md b/content/rancher/v2.5/en/monitoring/legacy/notifiers/_index.md deleted file mode 100644 index e4f4ea96403..00000000000 --- a/content/rancher/v2.5/en/monitoring/legacy/notifiers/_index.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: Notifiers -weight: 11 ---- - -Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. - -Rancher integrates with a variety of popular IT services, including: - -- **Slack**: Send alert notifications to your Slack channels. -- **Email**: Choose email recipients for alert notifications. -- **PagerDuty**: Route notifications to staff by phone, SMS, or personal email. -- **WebHooks**: Update a webpage with alert notifications. -- **WeChat**: Send alert notifications to your Enterprise WeChat contacts. - -This section covers the following topics: - -- [Roles-based access control for notifiers](#roles-based-access-control-for-notifiers) -- [Adding notifiers](#adding-notifiers) -- [Managing notifiers](#managing-notifiers) -- [Example payload for a webhook alert notifier](#example-payload-for-a-webhook-alert-notifier) - -### Roles-based Access Control for Notifiers - -Notifiers are configured at the cluster level. This model ensures that only cluster owners need to configure notifiers, leaving project owners to simply configure alerts in the scope of their projects. You don't need to dispense privileges like SMTP server access or cloud account access. - -### Adding Notifiers - -Set up a notifier so that you can begin configuring and sending alerts. - -1. From the **Global View**, open the cluster that you want to add a notifier. - -1. From the main menu, select **Tools > Notifiers**. Then click **Add Notifier**. - -1. Select the service you want to use as your notifier, and then fill out the form. -{{% accordion id="slack" label="Slack" %}} -1. Enter a **Name** for the notifier. -1. From Slack, create a webhook. For instructions, see the [Slack Documentation](https://get.slack.help/hc/en-us/articles/115005265063-Incoming-WebHooks-for-Slack). -1. From Rancher, enter your Slack webhook **URL**. -1. Enter the name of the channel that you want to send alert notifications in the following format: `#`. - - Both public and private channels are supported. -1. Click **Test**. If the test is successful, the Slack channel you're configuring for the notifier outputs `Slack setting validated`. -{{% /accordion %}} -{{% accordion id="email" label="Email" %}} -1. Enter a **Name** for the notifier. -1. In the **Sender** field, enter an email address available on your mail server that you want to send the notification. -1. In the **Host** field, enter the IP address or hostname for your SMTP server. Example: `smtp.email.com` -1. In the **Port** field, enter the port used for email. Typically, TLS uses `587` and SSL uses `465`. If you're using TLS, make sure **Use TLS** is selected. -1. Enter a **Username** and **Password** that authenticate with the SMTP server. -1. In the **Default Recipient** field, enter the email address that you want to receive the notification. -1. Click **Test**. If the test is successful, Rancher prints `settings validated` and you receive a test notification email. -{{% /accordion %}} -{{% accordion id="pagerduty" label="PagerDuty" %}} -1. Enter a **Name** for the notifier. -1. From PagerDuty, create a Prometheus integration. For instructions, see the [PagerDuty Documentation](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/). -1. From PagerDuty, copy the integration's **Integration Key**. -1. From Rancher, enter the key in the **Service Key** field. -1. Click **Test**. If the test is successful, your PagerDuty endpoint outputs `PagerDuty setting validated`. -{{% /accordion %}} -{{% accordion id="webhook" label="WebHook" %}} -1. Enter a **Name** for the notifier. -1. Using the app of your choice, create a webhook URL. -1. Enter your webhook **URL**. -1. Click **Test**. If the test is successful, the URL you're configuring as a notifier outputs `Webhook setting validated`. -{{% /accordion %}} -{{% accordion id="WeChat" label="WeChat" %}} - -1. Enter a **Name** for the notifier. -1. In the **Corporation ID** field, enter the "EnterpriseID" of your corporation, you could get it from [Profile page](https://work.weixin.qq.com/wework_admin/frame#profile). -1. From Enterprise WeChat, create an application in the [Application page](https://work.weixin.qq.com/wework_admin/frame#apps), and then enter the "AgentId" and "Secret" of this application to the **Application Agent ID** and **Application Secret** fields. -1. Select the **Recipient Type** and then enter a corresponding id to **Default Recipient** field, for example, the party id, tag id or user account that you want to receive the notification. You could get contact information from [Contacts page](https://work.weixin.qq.com/wework_admin/frame#contacts). -{{% /accordion %}} - -1. Select **Enable** for **Send Resolved Alerts** if you wish to notify about resolved alerts. -1. Click **Add** to complete adding the notifier. - -**Result:** Your notifier is added to Rancher. - - -### Managing Notifiers - -After you set up notifiers, you can manage them. From the **Global** view, open the cluster that you want to manage your notifiers. Select **Tools > Notifiers**. You can: - -- **Edit** their settings that you configured during their initial setup. -- **Clone** them, to quickly setup slightly different notifiers. -- **Delete** them when they're no longer necessary. - -### Example Payload for a Webhook Alert Notifier - -```json -{ - "receiver": "c-2a3bc:kube-components-alert", - "status": "firing", - "alerts": [ - { - "status": "firing", - "labels": { - "alert_name": "Scheduler is unavailable", - "alert_type": "systemService", - "cluster_name": "mycluster (ID: c-2a3bc)", - "component_name": "scheduler", - "group_id": "c-2a3bc:kube-components-alert", - "logs": "Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused", - "rule_id": "c-2a3bc:kube-components-alert_scheduler-system-service", - "severity": "critical" - }, - "annotations": {}, - "startsAt": "2020-01-30T19:18:13.321684733Z", - "endsAt": "0001-01-01T00:00:00Z", - "generatorURL": "" - } - ], - "groupLabels": { - "component_name": "scheduler", - "rule_id": "c-2a3bc:kube-components-alert_scheduler-system-service" - }, - "commonLabels": { - "alert_name": "Scheduler is unavailable", - "alert_type": "systemService", - "cluster_name": "mycluster (ID: c-2a3bc)" - } -} -``` -### What's Next? - -After creating a notifier, set up alerts to receive notifications of Rancher system events. - -- [Cluster owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) can set up alerts at the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/). -- [Project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can set up alerts at the [project level]({{}}/rancher/v2.x/en/project-admin/tools/alerts/). diff --git a/content/rancher/v2.5/en/opa-gatekeper/_index.md b/content/rancher/v2.5/en/opa-gatekeper/_index.md deleted file mode 100644 index cd78f7c4a1d..00000000000 --- a/content/rancher/v2.5/en/opa-gatekeper/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: OPA Gatekeeper -weight: 12 ---- - -> This page is under construction. diff --git a/content/rancher/v2.5/en/opa-gatekeper/legacy/_index.md b/content/rancher/v2.5/en/opa-gatekeper/legacy/_index.md deleted file mode 100644 index 55103e52eb9..00000000000 --- a/content/rancher/v2.5/en/opa-gatekeper/legacy/_index.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Legacy UI Docs -weight: 2 ---- - -To ensure consistency and compliance, every organization needs the ability to define and enforce policies in its environment in an automated way. OPA [https://www.openpolicyagent.org/] (Open Policy Agent) is a policy engine that facilitates policy-based control for cloud native environments. Rancher provides the ability to enable OPA Gatekeeper in Kubernetes clusters, and also installs a couple of built-in policy definitions, which are also called constraint templates. - -OPA provides a high-level declarative language that lets you specify policy as code and ability to extend simple APIs to offload policy decision-making. - -[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is a project that provides integration between OPA and Kubernetes. OPA Gatekeeper provides: - -- An extensible, parameterized policy library. -- Native Kubernetes CRDs for instantiating the policy library, also called “constraints." -- Native Kubernetes CRDs for extending the policy library, also called "constraint templates." -- Audit functionality. - -To read more about OPA, please refer to the [official documentation.](https://www.openpolicyagent.org/docs/latest/) - -# How the OPA Gatekeeper Integration Works - -Kubernetes provides the ability to extend API server functionality via admission controller webhooks, which are invoked whenever a resource is created, updated or deleted. Gatekeeper is installed as a validating webhook and enforces policies defined by Kubernetes custom resource definitions. In addition to the admission control usage, Gatekeeper provides the capability to audit existing resources in Kubernetes clusters and mark current violations of enabled policies. - -OPA Gatekeeper is made availale via Rancher's Helm system chart, and it is installed in a namespace named `gatekeeper-system.` - -# Enabling OPA Gatekeeper in a Cluster - -> **Prerequisites:** -> -> - Only administrators and cluster owners can enable OPA Gatekeeper. -> - The dashboard needs to be enabled using the `dashboard` feature flag. For more information, refer to the [section on enabling experimental features.]({{}}/rancher/v2.x/en/installation/options/feature-flags/) - -1. Navigate to the cluster's **Dashboard** view. -1. On the left side menu, expand the cluster menu and click on **OPA Gatekeeper.** -1. To install Gatekeeper with the default configuration, click on **Enable Gatekeeper (v0.1.0) with defaults.** -1. To change any default configuration, click on **Customize Gatekeeper yaml configuration.** - -# Constraint Templates - -[Constraint templates](https://github.com/open-policy-agent/gatekeeper#constraint-templates) are Kubernetes custom resources that define the schema and Rego logic of the OPA policy to be applied by Gatekeeper. For more information on the Rego policy language, refer to the [official documentation.](https://www.openpolicyagent.org/docs/latest/policy-language/) - -When OPA Gatekeeper is enabled, Rancher installs some templates by default. - -To list the constraint templates installed in the cluster, go to the left side menu under OPA Gatekeeper and click on **Templates.** - -Rancher also provides the ability to create your own constraint templates by importing YAML definitions. - -# Creating and Configuring Constraints - -[Constraints](https://github.com/open-policy-agent/gatekeeper#constraints) are Kubernetes custom resources that define the scope of objects to which a specific constraint template applies to. The complete policy is defined by constraint templates and constraints together. - -> **Prerequisites:** OPA Gatekeeper must be enabled in the cluster. - -To list the constraints installed, go to the left side menu under OPA Gatekeeper, and click on **Constraints.** - -New constraints can be created from a constraint template. - -Rancher provides the ability to create a constraint by using a convenient form that lets you input the various constraint fields. - -The **Edit as yaml** option is also availble to configure the the constraint's yaml definition. - -### Exempting Rancher's System Namespaces from Constraints - -When a constraint is created, ensure that it does not apply to any Rancher or Kubernetes system namespaces. If the system namespaces are not excluded, then it is possible to see many resources under them marked as violations of the constraint. - -To limit the scope of the constraint only to user namespaces, always specify these namespaces under the **Match** field of the constraint. - -Also, the constraint may interfere with other Rancher functionality and deny system workloads from being deployed. To avoid this, exclude all Rancher-specific namespaces from your constraints. - -# Enforcing Constraints in your Cluster - -When the **Enforcement Action** is **Deny,** the constraint is immediately enabled and will deny any requests that violate the policy defined. By default, the enforcement value is **Deny.** - -When the **Enforcement Action** is **Dryrun,** then any resources that violate the policy are only recorded under the constraint's status field. - -To enforce constraints, create a constraint using the form. In the **Enforcement Action** field, choose **Deny.** - -# Audit and Violations in your Cluster - -OPA Gatekeeper runs a periodic audit to check if any existing resource violates any enforced constraint. The audit-interval (default 300s) can be configured while installing Gatekeeper. - -On the Gatekeeper page, any violations of the defined constraints are listed. - -Also under **Constraints,** the number of violations of the constraint can be found. - -The detail view of each constraint lists information about the resource that violated the constraint. - -# Disabling Gatekeeper - -1. Navigate to the cluster's Dashboard view -1. On the left side menu, expand the cluster menu and click on **OPA Gatekeeper.** -1. Click the **⋮ > Disable**. - -**Result:** Upon disabling OPA Gatekeeper, all constraint templates and constraints will also be deleted. - diff --git a/content/rancher/v2.5/en/overview/_index.md b/content/rancher/v2.5/en/overview/_index.md deleted file mode 100644 index 5a40167303b..00000000000 --- a/content/rancher/v2.5/en/overview/_index.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Overview -weight: 1 ---- - -> This page is under construction. - -Rancher is a container management platform built for organizations that deploy containers in production. Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. - -# Run Kubernetes Everywhere - -Kubernetes has become the container orchestration standard. Most cloud and virtualization vendors now offer it as standard infrastructure. Rancher users have the choice of creating Kubernetes clusters with Rancher Kubernetes Engine (RKE) or cloud Kubernetes services, such as GKE, AKS, and EKS. Rancher users can also import and manage their existing Kubernetes clusters created using any Kubernetes distribution or installer. - -# Meet IT requirements - -Rancher supports centralized authentication, access control, and monitoring for all Kubernetes clusters under its control. For example, you can: - -- Use your Active Directory credentials to access Kubernetes clusters hosted by cloud vendors, such as GKE. -- Setup and enforce access control and security policies across all users, groups, projects, clusters, and clouds. -- View the health and capacity of your Kubernetes clusters from a single-pane-of-glass. - -# Empower DevOps Teams - -Rancher provides an intuitive user interface for DevOps engineers to manage their application workload. The user does not need to have in-depth knowledge of Kubernetes concepts to start using Rancher. Rancher catalog contains a set of useful DevOps tools. Rancher is certified with a wide selection of cloud native ecosystem products, including, for example, security tools, monitoring systems, container registries, and storage and networking drivers. - -The following figure illustrates the role Rancher plays in IT and DevOps organizations. Each team deploys their applications on the public or private clouds they choose. IT administrators gain visibility and enforce policies across all users, clusters, and clouds. - -![Platform]({{}}/img/rancher/platform.png) - -# Features of the Rancher API Server - -The Rancher API server is built on top of an embedded Kubernetes API server and an etcd database. It implements the following functionalities: - -### Authorization and Role-Based Access Control - -- **User management:** The Rancher API server [manages user identities]({{}}/rancher/v2.x/en/admin-settings/authentication/) that correspond to external authentication providers like Active Directory or GitHub, in addition to local users. -- **Authorization:** The Rancher API server manages [access control]({{}}/rancher/v2.x/en/admin-settings/rbac/) and [security]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/) policies. - -### Working with Kubernetes - -- **Provisioning Kubernetes clusters:** The Rancher API server can [provision Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/) on existing nodes, or perform [Kubernetes upgrades.]({{}}/rancher/v2.x/en/cluster-admin/upgrading-kubernetes) -- **Catalog management:** Rancher provides the ability to use a [catalog of Helm charts]({{}}/rancher/v2.x/en/catalog/) that make it easy to repeatedly deploy applications. -- **Managing projects:** A project is a group of multiple namespaces and access control policies within a cluster. A project is a Rancher concept, not a Kubernetes concept, which allows you manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration]({{}}/rancher/v2.x/en/project-admin/) and for [managing applications within projects.]({{}}/rancher/v2.x/en/k8s-in-rancher/) -- **Pipelines:** Setting up a [pipeline]({{}}/rancher/v2.x/en/project-admin/pipelines/) can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. -- **Istio:** Our [integration with Istio]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/) is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. - -### Working with Cloud Infrastructure - -- **Tracking nodes:** The Rancher API server tracks identities of all the [nodes]({{}}/rancher/v2.x/en/cluster-admin/nodes/) in all clusters. -- **Setting up infrastructure:** When configured to use a cloud provider, Rancher can dynamically provision [new nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) and [persistent storage]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) in the cloud. - -### Cluster Visibility - -- **Logging:** Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. Logging can be set up [at the cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) or [at the project level.]({{}}/rancher/v2.x/en/project-admin/tools/logging/) -- **Monitoring:** Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution. Monitoring can be configured [at the cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) or [at the project level.]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/) -- **Alerting:** To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. To help you stay informed of these events, you can configure alerts [at the cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) or [at the project level.]({{}}/rancher/v2.x/en/project-admin/tools/alerts/) - -# Editing Downstream Clusters with Rancher - -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. - -After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table" %}} diff --git a/content/rancher/v2.5/en/overview/architecture/_index.md b/content/rancher/v2.5/en/overview/architecture/_index.md deleted file mode 100644 index 8087bcc74a4..00000000000 --- a/content/rancher/v2.5/en/overview/architecture/_index.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: Architecture -weight: 1 ---- - -> This page is under construction. - -This section focuses on the Rancher server, its components, and how Rancher communicates with downstream Kubernetes clusters. - -> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.x/en/overview/concepts) page. - -This section covers the following topics: - -- [Components](#components) -- [Installation](#installation) -- [Features](#features) -- [Infrastructure](#infrastructure) -- [Architecture of the Enterprise Cluster Manager](#architecture-of-the-enterprise-cluster-manager) -- [Communicating with downstream user clusters](#communicating-with-downstream-user-clusters) - - [The authentication proxy](#1-the-authentication-proxy) - - [Cluster controllers and cluster agents](#2-cluster-controllers-and-cluster-agents) - - [Node agents](#3-node-agents) - - [Authorized cluster endpoint](#4-authorized-cluster-endpoint) -- [Important files](#important-files) -- [Tools for provisioning Kubernetes clusters](#tools-for-provisioning-kubernetes-clusters) -- [Rancher server components and source code](#rancher-server-components-and-source-code) - -# Components - -Rancher comes with to main components: the Rancher Helm chart, which is used for installing Rancher on a Kubernetes cluster, and the Rancher CLI. The CLI allows you to easily provision a Kubernetes cluster if you don't already have a cluster to install Rancher. - - -# Installation - -There are two main ways that Rancher can be installed: - -1. You can use Helm to install the Rancher Helm chart on any Kubernetes cluster. -2. You can use the Rancher CLI to install a Rancher Kubernetes cluster. This cluster comes with the Rancher Helm chart built in. - -The installation path that you choose will affect the way that you upgrade Rancher, but not the way that Rancher is backed up and restored. - -# Features - -Rancher 2.5 allows many optional features to be enabled, including logging, monitoring, Istio, and the Enterprise Cluster Manager. - -For a list of main features of the Rancher API server, refer to the [overview section.]({{}}/rancher/v2.x/en/overview/#features-of-the-rancher-api-server) - -# Infrastructure - -For guidance about setting up the underlying infrastructure for the Rancher server, refer to the [architecture recommendations.]({{}}/rancher/v2.x/en/overview/architecture-recommendations) - -# Architecture of the Enterprise Cluster Manager - -The Enterprise Cluster Manager is an optional feature in Rancher. When enabled, it allows Rancher to centralize the authentication and management of multiple Kubernetes clusters. - -The majority of Rancher 2.x software runs on the Rancher Server. Rancher Server includes all the software components used to manage the entire Rancher deployment. - -The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two downstream Kubernetes clusters: one created by RKE and another created by Amazon EKS (Elastic Kubernetes Service). - -For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. - -The diagram below shows how users can manipulate both [Rancher-launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters and [hosted Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) clusters through Rancher's authentication proxy: - -
Managing Kubernetes Clusters through Rancher's Authentication Proxy
- -![Architecture]({{}}/img/rancher/rancher-architecture-rancher-api-server.svg) - -You can install Rancher on a single node, or on a high-availability Kubernetes cluster. - -A high-availability Kubernetes installation is recommended for production. A Docker installation may be used for development and testing purposes, but there is no migration path from a single-node to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. - -The Rancher server, regardless of the installation method, should always run on nodes that are separate from the downstream user clusters that it manages. If Rancher is installed on a high-availability Kubernetes cluster, it should run on a separate cluster from the cluster(s) it manages. - -# Communicating with Downstream User Clusters - -This section describes how Rancher provisions and manages the downstream user clusters that run your apps and services. - -The below diagram shows how the cluster controllers, cluster agents, and node agents allow Rancher to control downstream clusters. - -
Communicating with Downstream Clusters
- -![Rancher Components]({{}}/img/rancher/rancher-architecture-cluster-controller.svg) - -The following descriptions correspond to the numbers in the diagram above: - -1. [The Authentication Proxy](#1-the-authentication-proxy) -2. [Cluster Controllers and Cluster Agents](#2-cluster-controllers-and-cluster-agents) -3. [Node Agents](#3-node-agents) -4. [Authorized Cluster Endpoint](#4-authorized-cluster-endpoint) - -### 1. The Authentication Proxy - -In this diagram, a user named Bob wants to see all pods running on a downstream user cluster called User Cluster 1. From within Rancher, he can run a `kubectl` command to see -the pods. Bob is authenticated through Rancher's authentication proxy. - -The authentication proxy forwards all Kubernetes API calls to downstream clusters. It integrates with authentication services like local authentication, Active Directory, and GitHub. On every Kubernetes API call, the authentication proxy authenticates the caller and sets the proper Kubernetes impersonation headers before forwarding the call to Kubernetes masters. - -Rancher communicates with Kubernetes clusters using a [service account,](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) which provides an identity for processes that run in a pod. - -By default, Rancher generates a [kubeconfig file]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) that contains credentials for proxying through the Rancher server to connect to the Kubernetes API server on a downstream user cluster. The kubeconfig file (`kube_config_rancher-cluster.yml`) contains full access to the cluster. - -### 2. Cluster Controllers and Cluster Agents - -Each downstream user cluster has a cluster agent, which opens a tunnel to the corresponding cluster controller within the Rancher server. - -There is one cluster controller and one cluster agent for each downstream cluster. Each cluster controller: - -- Watches for resource changes in the downstream cluster -- Brings the current state of the downstream cluster to the desired state -- Configures access control policies to clusters and projects -- Provisions clusters by calling the required Docker machine drivers and Kubernetes engines, such as RKE and GKE - -By default, to enable Rancher to communicate with a downstream cluster, the cluster controller connects to the cluster agent. If the cluster agent is not available, the cluster controller can connect to a [node agent](#3-node-agents) instead. - -The cluster agent, also called `cattle-cluster-agent`, is a component that runs in a downstream user cluster. It performs the following tasks: - -- Connects to the Kubernetes API of Rancher-launched Kubernetes clusters -- Manages workloads, pod creation and deployment within each cluster -- Applies the roles and bindings defined in each cluster's global policies -- Communicates between the cluster and Rancher server (through a tunnel to the cluster controller) about events, stats, node info, and health - -### 3. Node Agents - -If the cluster agent (also called `cattle-cluster-agent`) is not available, one of the node agents creates a tunnel to the cluster controller to communicate with Rancher. - -The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) resource to make sure it runs on every node in a Rancher-launched Kubernetes cluster. It is used to interact with the nodes when performing cluster operations. Examples of cluster operations include upgrading the Kubernetes version and creating or restoring etcd snapshots. - -### 4. Authorized Cluster Endpoint - -An authorized cluster endpoint allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. - -> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters) to provision the cluster. It is not available for imported clusters, or for clusters in a hosted Kubernetes provider, such as Amazon's EKS. - -There are two main reasons why a user might need the authorized cluster endpoint: - -- To access a downstream user cluster while Rancher is down -- To reduce latency in situations where the Rancher server and downstream cluster are separated by a long distance - -The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the authorized cluster endpoint. When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. - -Like the authorized cluster endpoint, the `kube-api-auth` authentication service is also only available for Rancher-launched Kubernetes clusters. - -> **Example scenario:** Let's say that the Rancher server is located in the United States, and User Cluster 1 is located in Australia. A user, Alice, also lives in Australia. Alice can manipulate resources in User Cluster 1 by using the Rancher UI, but her requests will have to be sent from Australia to the Rancher server in the United States, then be proxied back to Australia, where the downstream user cluster is. The geographical distance may cause significant latency, which Alice can reduce by using the authorized cluster endpoint. - -With this endpoint enabled for the downstream cluster, Rancher generates an extra Kubernetes context in the kubeconfig file in order to connect directly to the cluster. This file has the credentials for `kubectl` and `helm`. - -You will need to use a context defined in this kubeconfig file to access the cluster if Rancher goes down. Therefore, we recommend exporting the kubeconfig file so that if Rancher goes down, you can still use the credentials in the file to access your cluster. For more information, refer to the section on accessing your cluster with [kubectl and the kubeconfig file.]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl) - -# Important Files - -The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The Kubeconfig file for the cluster, this file contains credentials for full access to the cluster. You can use this file to authenticate with a Rancher-launched Kubernetes cluster if Rancher goes down. -- `rancher-cluster.rkestate`: The Kubernetes cluster state file. This file contains credentials for full access to the cluster. Note: This state file is only created when using RKE v0.2.0 or higher. - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -For more information on connecting to a cluster without the Rancher authentication proxy and other configuration options, refer to the [kubeconfig file]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) documentation. - -# Tools for Provisioning Kubernetes Clusters - -The tools that Rancher uses to provision downstream user clusters depends on the type of cluster that is being provisioned. - -### Rancher Launched Kubernetes for Nodes Hosted in an Infrastructure Provider - -Rancher can dynamically provision nodes in a provider such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. - -Rancher provisions this type of cluster using [RKE](https://github.com/rancher/rke) and [docker-machine.](https://github.com/rancher/machine) - -### Rancher Launched Kubernetes for Custom Nodes - -When setting up this type of cluster, Rancher installs Kubernetes on existing nodes, which creates a custom cluster. - -Rancher provisions this type of cluster using [RKE.](https://github.com/rancher/rke) - -### Hosted Kubernetes Providers - -When setting up this type of cluster, Kubernetes is installed by providers such as Google Kubernetes Engine, Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -Rancher provisions this type of cluster using [kontainer-engine.](https://github.com/rancher/kontainer-engine) - -### Imported Kubernetes Clusters - -In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. - -# Rancher Server Components and Source Code - -This diagram shows each component that the Rancher server is composed of: - -![Rancher Components]({{}}/img/rancher/rancher-architecture-rancher-components.svg) - -The GitHub repositories for Rancher can be found at the following links: - -- [Main Rancher server repository](https://github.com/rancher/rancher) -- [Rancher UI](https://github.com/rancher/ui) -- [Rancher API UI](https://github.com/rancher/api-ui) -- [Norman,](https://github.com/rancher/norman) Rancher's API framework -- [Types](https://github.com/rancher/types) -- [Rancher CLI](https://github.com/rancher/cli) -- [Catalog applications](https://github.com/rancher/helm) - -This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.]({{}}/rancher/v2.x/en/contributing/#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. diff --git a/content/rancher/v2.5/en/overview/concepts/_index.md b/content/rancher/v2.5/en/overview/concepts/_index.md deleted file mode 100644 index f4fe9fc26f0..00000000000 --- a/content/rancher/v2.5/en/overview/concepts/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Kubernetes Concepts -weight: 4 ---- - -This page explains concepts related to Kubernetes that are important for understanding how Rancher works. The descriptions below provide a simplified interview of Kubernetes components. For more details, refer to the [official documentation on Kubernetes components.](https://kubernetes.io/docs/concepts/overview/components/) - -This section covers the following topics: - -- [About Docker](#about-docker) -- [About Kubernetes](#about-kubernetes) -- [What is a Kubernetes Cluster?](#what-is-a-kubernetes-cluster) -- [Roles for Nodes in Kubernetes Clusters](#roles-for-nodes-in-kubernetes-clusters) - - [etcd Nodes](#etcd-nodes) - - [Controlplane Nodes](#controlplane-nodes) - - [Worker Nodes](#worker-nodes) -- [About Helm](#about-helm) - -# About Docker - -Docker is the container packaging and runtime standard. Developers build container images from Dockerfiles and distribute container images from Docker registries. [Docker Hub](https://hub.docker.com) is the most popular public registry. Many organizations also set up private Docker registries. Docker is primarily used to manage containers on individual nodes. - ->**Note:** Although Rancher 1.6 supported Docker Swarm clustering technology, it is no longer supported in Rancher 2.x due to the success of Kubernetes. - -# About Kubernetes - -Kubernetes is the container cluster management standard. YAML files specify containers and other resources that form an application. Kubernetes performs functions such as scheduling, scaling, service discovery, health check, secret management, and configuration management. - -# What is a Kubernetes Cluster? - -A cluster is a group of computers that work together as a single system. - -A _Kubernetes Cluster_ is a cluster that uses the [Kubernetes container-orchestration system](https://kubernetes.io/) to deploy, maintain, and scale Docker containers, allowing your organization to automate application operations. - -# Roles for Nodes in Kubernetes Clusters - -Each computing resource in a Kubernetes cluster is called a _node_. Nodes can be either bare-metal servers or virtual machines. Kubernetes classifies nodes into three types: _etcd_ nodes, _control plane_ nodes, and _worker_ nodes. - -A Kubernetes cluster consists of at least one etcd, controlplane, and worker node. - -### etcd Nodes - -Rancher uses etcd as a data store in both single node and high-availability installations. In Kubernetes, etcd is also a role for nodes that store the cluster state. - -The state of a Kubernetes cluster is maintained in [etcd.](https://kubernetes.io/docs/concepts/overview/components/#etcd) The etcd nodes run the etcd database. - -The etcd database component is a distributed key-value store used as Kubernetes storage for all cluster data, such as cluster coordination and state management. It is recommended to run etcd on multiple nodes so that there's always a backup available for failover. - -Although you can run etcd on just one node, etcd requires a majority of nodes, a quorum, to agree on updates to the cluster state. The cluster should always contain enough healthy etcd nodes to form a quorum. For a cluster with n members, a quorum is (n/2)+1. For any odd-sized cluster, adding one node will always increase the number of nodes necessary for a quorum. - -Three etcd nodes is generally sufficient for smaller clusters and five etcd nodes for large clusters. - -### Controlplane Nodes - -Controlplane nodes run the Kubernetes API server, scheduler, and controller manager. These nodes take care of routine tasks to ensure that your cluster maintains your configuration. Because all cluster data is stored on your etcd nodes, control plane nodes are stateless. You can run control plane on a single node, although two or more nodes are recommended for redundancy. Additionally, a single node can share the control plane and etcd roles. - -### Worker Nodes - -Each [worker node](https://kubernetes.io/docs/concepts/architecture/nodes/) runs the following: - -- **Kubelets:** An agent that monitors the state of the node, ensuring your containers are healthy. -- **Workloads:** The containers and pods that hold your apps, as well as other types of deployments. - -Worker nodes also run storage and networking drivers, and ingress controllers when required. You create as many worker nodes as necessary to run your [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/). - -# About Helm - -For high-availability installations of Rancher, Helm is the tool used to install Rancher on a Kubernetes cluster. - -Helm is the package management tool of choice for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh). - -For more information on service accounts and cluster role binding, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) \ No newline at end of file diff --git a/content/rancher/v2.5/en/security/_index.md b/content/rancher/v2.5/en/security/_index.md deleted file mode 100644 index d93f5e2d328..00000000000 --- a/content/rancher/v2.5/en/security/_index.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Security -weight: 15 ---- - - - - - - - -
-

Security policy

-

Rancher Labs supports responsible disclosure, and endeavours to resolve all issues in a reasonable time frame.

-
-

Reporting process

-

Please submit possible security issues by emailing security@rancher.com

-
-

Announcements

-

Subscribe to the Rancher announcements forum for release updates.

-
- -Security is at the heart of all Rancher features. From integrating with all the popular authentication tools and services, to an enterprise grade [RBAC capability,]({{}}/rancher/v2.x/en/admin-settings/rbac) Rancher makes your Kubernetes clusters even more secure. - -On this page, we provide security-related documentation along with resources to help you secure your Rancher installation and your downstream Kubernetes clusters: - -- [Running a CIS security scan on a Kubernetes cluster](#running-a-cis-security-scan-on-a-kubernetes-cluster) -- [Guide to hardening Rancher installations](#rancher-hardening-guide) -- [The CIS Benchmark and self-assessment](#the-cis-benchmark-and-self-assessment) -- [Third-party penetration test reports](#third-party-penetration-test-reports) -- [Rancher CVEs and resolutions](#rancher-cves-and-resolutions) -- [Security Tips and Best Practices](#security-tips-and-best-practices) - -### Running a CIS Security Scan on a Kubernetes Cluster - -Rancher leverages [kube-bench](https://github.com/aquasecurity/kube-bench) to run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS (Center for Internet Security) Kubernetes Benchmark. - -The CIS Kubernetes Benchmark is a reference document that can be used to establish a secure configuration baseline for Kubernetes. - -The Center for Internet Security (CIS) is a 501(c)(3) nonprofit organization, formed in October 2000, with a mission is to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace." - -CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. - -The Benchmark provides recommendations of two types: Scored and Not Scored. We run tests related to only Scored recommendations. - -When Rancher runs a CIS security scan on a cluster, it generates a report showing the results of each test, including a summary with the number of passed, skipped and failed tests. The report also includes remediation steps for any failed tests. - -For details, refer to the section on [security scans.]({{}}/rancher/v2.x/en/security/security-scan) - -### Rancher Hardening Guide - -The Rancher Hardening Guide is based off of controls and best practices found in the CIS Kubernetes Benchmark from the Center for Internet Security. - -The hardening guide provides prescriptive guidance for hardening a production installation of Rancher v2.1.x, v2.2.x and v.2.3.x. See Rancher's [Self Assessment of the CIS Kubernetes Benchmark](#cis-benchmark-rancher-self-assessment) for the full list of security controls. - -> The hardening guides describe how to secure the nodes in your cluster, and it is recommended to follow a hardening guide before installing Kubernetes. - -Each version of the hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -[Hardening Guide v2.4]({{}}/rancher/v2.x/en/security/hardening-2.4/) | Rancher v2.4 | Benchmark v1.5 | Kubernetes v1.15 -[Hardening Guide v2.3.5]({{}}/rancher/v2.x/en/security/hardening-2.3.5/) | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes v1.15 -[Hardening Guide v2.3.3]({{}}/rancher/v2.x/en/security/hardening-2.3.3/) | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes v1.14, v1.15, and v1.16 -[Hardening Guide v2.3]({{}}/rancher/v2.x/en/security/hardening-2.3/) | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes v1.15 -[Hardening Guide v2.2]({{}}/rancher/v2.x/en/security/hardening-2.2/) | Rancher v2.2.x | Benchmark v1.4.1 and 1.4.0 | Kubernetes v1.13 -[Hardening Guide v2.1]({{}}/rancher/v2.x/en/security/hardening-2.1/) | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes v1.11 - -### The CIS Benchmark and Self-Assessment - -The benchmark self-assessment is a companion to the Rancher security hardening guide. While the hardening guide shows you how to harden the cluster, the benchmark guide is meant to help you evaluate the level of security of the hardened cluster. - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. The original benchmark documents can be downloaded from the [CIS website](https://www.cisecurity.org/benchmark/kubernetes/). - -Each version of Rancher's self assessment guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -[Self Assessment Guide v2.4]({{}}/rancher/v2.x/en/security/benchmark-2.4/#cis-kubernetes-benchmark-1-5-0-rancher-2-4-+-with-kubernetes-1-15) | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 -[Self Assessment Guide v2.3.5]({{}}/rancher/v2.x/en/security/benchmark-2.3.5/#cis-kubernetes-benchmark-1-5-0-rancher-2-3-5-+-with-kubernetes-1-15) | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 -[Self Assessment Guide v2.3.3]({{}}/rancher/v2.x/en/security/benchmark-2.3.3/#cis-kubernetes-benchmark-1-4-1-rancher-2-3-3-+-with-kubernetes-1-16) | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kubernetes v1.16 | Benchmark v1.4.1 -[Self Assessment Guide v2.3]({{}}/rancher/v2.x/en/security/benchmark-2.3/#cis-kubernetes-benchmark-1-4-1-rancher-2-3-0-2-3-2-with-kubernetes-1-15) | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kubernetes v1.15 | Benchmark v1.4.1 -[Self Assessment Guide v2.2]({{}}/rancher/v2.x/en/security/benchmark-2.2/) | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes v1.13 | Benchmark v1.4.0 and v1.4.1 -[Self Assessment Guide v2.1]({{}}/rancher/v2.x/en/security/benchmark-2.1/) | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes v1.11 | Benchmark 1.3.0 - -### Third-party Penetration Test Reports - -Rancher periodically hires third parties to perform security audits and penetration tests of the Rancher 2.x software stack. The environments under test follow the Rancher provided hardening guides at the time of the testing. Results are posted when the third party has also verified fixes classified MEDIUM or above. - -Results: - -- [Cure53 Pen Test - 7/2019](https://releases.rancher.com/documents/security/pen-tests/2019/RAN-01-cure53-report.final.pdf) -- [Untamed Theory Pen Test- 3/2019](https://releases.rancher.com/documents/security/pen-tests/2019/UntamedTheory-Rancher_SecurityAssessment-20190712_v5.pdf) - -### Rancher CVEs and Resolutions - -Rancher is committed to informing the community of security issues in our products. For the list of CVEs (Common Vulnerabilities and Exposures) for issues we have resolved, refer to [this page.](./cve) - -### Security Tips and Best Practices - -Our [best practices guide]({{}}/rancher/v2.x/en/best-practices/management/#tips-for-security) includes basic tips for increasing security in Rancher. diff --git a/content/rancher/v2.5/en/security/cve/_index.md b/content/rancher/v2.5/en/security/cve/_index.md deleted file mode 100644 index 02c00946dc9..00000000000 --- a/content/rancher/v2.5/en/security/cve/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Rancher CVEs and Resolutions -weight: 300 ---- - -Rancher is committed to informing the community of security issues in our products. Rancher will publish CVEs (Common Vulnerabilities and Exposures) for issues we have resolved. - -| ID | Description | Date | Resolution | -|----|-------------|------|------------| -| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions]({{}}/rancher/v2.x/en/upgrades/rollbacks/). | -| [CVE-2019-6287](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6287) | Project members continue to get access to namespaces from projects that they were removed from if they were added to more than one project. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) | -| [CVE-2019-11202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11202) | The default admin, that is shipped with Rancher, will be re-created upon restart of Rancher despite being explicitly deleted. | 16 Apr 2019 | [Rancher v2.2.2](https://github.com/rancher/rancher/releases/tag/v2.2.2), [Rancher v2.1.9](https://github.com/rancher/rancher/releases/tag/v2.1.9) and [Rancher v2.0.14](https://github.com/rancher/rancher/releases/tag/v2.0.14) | -| [CVE-2019-12274](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12274) | Nodes using the built-in node drivers using a file path option allows the machine to read arbitrary files including sensitive ones from inside the Rancher server container. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | -| [CVE-2019-12303](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12303) | Project owners can inject extra fluentd logging configurations that makes it possible to read files or execute arbitrary commands inside the fluentd container. Reported by Tyler Welton from Untamed Theory. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | -| [CVE-2019-13209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13209) | The vulnerability is known as a [Cross-Site Websocket Hijacking attack](https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html). This attack allows an exploiter to gain access to clusters managed by Rancher with the roles/permissions of a victim. It requires that a victim to be logged into a Rancher server and then access a third-party site hosted by the exploiter. Once that is accomplished, the exploiter is able to execute commands against the Kubernetes API with the permissions and identity of the victim. Reported by Matt Belisle and Alex Stevenson from Workiva. | 15 Jul 2019 | [Rancher v2.2.5](https://github.com/rancher/rancher/releases/tag/v2.2.5), [Rancher v2.1.11](https://github.com/rancher/rancher/releases/tag/v2.1.11) and [Rancher v2.0.16](https://github.com/rancher/rancher/releases/tag/v2.0.16) | -| [CVE-2019-14436](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14436) | The vulnerability allows a member of a project that has access to edit role bindings to be able to assign themselves or others a cluster level role granting them administrator access to that cluster. The issue was found and reported by Michal Lipinski at Nokia. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | -| [CVE-2019-14435](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14435) | This vulnerability allows authenticated users to potentially extract otherwise private data out of IPs reachable from system service containers used by Rancher. This can include but not only limited to services such as cloud provider metadata services. Although Rancher allow users to configure whitelisted domains for system service access, this flaw can still be exploited by a carefully crafted HTTP request. The issue was found and reported by Matt Belisle and Alex Stevenson at Workiva. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | diff --git a/content/rancher/v2.5/en/security/security-scan/_index.md b/content/rancher/v2.5/en/security/security-scan/_index.md deleted file mode 100644 index 4d9dc4f8831..00000000000 --- a/content/rancher/v2.5/en/security/security-scan/_index.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: Security Scans -weight: 1 ---- - -Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. - -The Center for Internet Security (CIS) is a 501(c)(3) nonprofit organization, formed in October 2000, with a mission is to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace". The organization is headquartered in East Greenbush, New York, with members including large corporations, government agencies, and academic institutions. - -CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. - -The Benchmark provides recommendations of two types: Scored and Not Scored. We run tests related to only Scored recommendations. - -- [About the CIS Benchmark](#about-the-cis-benchmark) -- [About the generated report](#about-the-generated-report) -- [Test profiles](#test-profiles) -- [Skipped and not applicable tests](#skipped-and-not-applicable-tests) - - [CIS Benchmark v1.4 skipped tests](#cis-benchmark-v1-4-skipped-tests) - - [CIS Benchmark v1.4 not applicable tests](#cis-benchmark-v1-4-not-applicable-tests) -- [Prerequisites](#prerequisites) -- [Running a scan](#running-a-scan) -- [Scheduling recurring scans](#scheduling-recurring-scans) -- [Skipping tests](#skipping-tests) -- [Setting alerts](#setting-alerts) -- [Deleting a report](#deleting-a-report) -- [Downloading a report](#downloading-a-report) - -# About the CIS Benchmark - -The Center for Internet Security is a 501(c)(3) nonprofit organization, formed in October 2000, with a mission is to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace". The organization is headquartered in East Greenbush, New York, with members including large corporations, government agencies, and academic institutions. - -CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. - -The official Benchmark documents are available through the CIS website. The sign-up form to access the documents is [here.](https://learn.cisecurity.org/benchmarks) - -To check clusters for CIS Kubernetes Benchmark compliance, the security scan leverages [kube-bench,](https://github.com/aquasecurity/kube-bench) an open-source tool from Aqua Security. - -# About the Generated Report - -Each scan generates a report can be viewed in the Rancher UI and can be downloaded in CSV format. - -As of Rancher v2.4, the scan will use the CIS Benchmark v1.4. The Benchmark version is included in the generated report. - -The Benchmark provides recommendations of two types: Scored and Not Scored. Recommendations marked as Not Scored in the Benchmark are not included in the generated report. - -Some tests are designated as "Not Applicable." These tests will not be run on any CIS scan because of the way that Rancher provisions RKE clusters. For information on how test results can be audited, and why some tests are designated to be not applicable, refer to Rancher's [self-assessment guide for the corresponding Kubernetes version.]({{}}/rancher/v2.x/en/security/#the-cis-benchmark-and-self-assessment) - -The report contains the following information: - -| Column in Report | Description | -|------------------|-------------| -| ID | The ID number of the CIS Benchmark. | -| Description | The description of the CIS Benchmark test. | -| Remediation | What needs to be fixed in order to pass the test. | -| State of Test | Indicates if the test passed, failed, was skipped, or was not applicable. | -| Node type | The node role, which affects which tests are run on the node. Master tests are run on controlplane nodes, etcd tests are run on etcd nodes, and node tests are run on the worker nodes. | -| Nodes | The name(s) of the node that the test was run on. | -| Passed_Nodes | The name(s) of the nodes that the test passed on. | -| Failed_Nodes | The name(s) of the nodes that the test failed on. | - -Refer to [the table in the cluster hardening guide]({{}}/rancher/v2.x/en/security/#rancher-hardening-guide) for information on which versions of Kubernetes, the Benchmark, Rancher, and our cluster hardening guide correspond to each other. Also refer to the hardening guide for configuration files of CIS-compliant clusters and information on remediating failed tests. - -# Test Profiles - -For every CIS benchmark version, Rancher ships with two types of profiles. These profiles are named based on the type of cluster (e.g. `RKE`), the CIS benchmark version (e.g. CIS 1.4) and the profile type (e.g. `Permissive` or `Hardened`). For example, a full profile name would be `RKE-CIS-1.4-Permissive` - -All profiles will have a set of not applicable tests that will be skipped during the CIS scan. These tests are not applicable based on how a RKE cluster manages Kubernetes. - -There are 2 types of profiles: - -- **Permissive:** This profile has a set of tests that have been will be skipped as these tests will fail on a default RKE Kubernetes cluster. Besides the list of skipped tests, the profile will also not run the not applicable tests. -- **Hardened:** This profile will not skip any tests, except for the non-applicable tests. - -In order to pass the "Hardened" profile, you will need to follow the steps on the [hardening guide]({{}}/rancher/v2.x/en/security/#rancher-hardening-guide) and use the `cluster.yml` defined in the hardening guide to provision a hardened cluster. - -# Skipped and Not Applicable Tests - -### CIS Benchmark v1.4 Skipped Tests - -Number | Description | Reason for Skipping ----|---|--- -1.1.11 | "Ensure that the admission control plugin AlwaysPullImages is set (Scored)" | Enabling AlwaysPullImages can use significant bandwidth. -1.1.21 | "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)" | When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. -1.1.24 | "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)" | Enabling Pod Security Policy can cause applications to unexpectedly fail. -1.1.34 | "Ensure that the --encryption-provider-config argument is set as appropriate (Scored)" | Enabling encryption changes how data can be recovered as data is encrypted. -1.1.35 | "Ensure that the encryption provider is set to aescbc (Scored)" | Enabling encryption changes how data can be recovered as data is encrypted. -1.1.36 | "Ensure that the admission control plugin EventRateLimit is set (Scored)" | EventRateLimit needs to be tuned depending on the cluster. -1.2.2 | "Ensure that the --address argument is set to 127.0.0.1 (Scored)" | Adding this argument prevents Rancher's monitoring tool to collect metrics on the scheduler. -1.3.7 | "Ensure that the --address argument is set to 127.0.0.1 (Scored)" | Adding this argument prevents Rancher's monitoring tool to collect metrics on the controller manager. -1.4.12 | "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)" | A system service account is required for etcd data directory ownership. Refer to Rancher's hardening guide for more details on how to configure this ownership. -1.7.2 | "Do not admit containers wishing to share the host process ID namespace (Scored)" | Enabling Pod Security Policy can cause applications to unexpectedly fail. -1.7.3 | "Do not admit containers wishing to share the host IPC namespace (Scored)" | Enabling Pod Security Policy can cause applications to unexpectedly fail. -1.7.4 | "Do not admit containers wishing to share the host network namespace (Scored)" | Enabling Pod Security Policy can cause applications to unexpectedly fail. -1.7.5 | " Do not admit containers with allowPrivilegeEscalation (Scored)" | Enabling Pod Security Policy can cause applications to unexpectedly fail. -2.1.6 | "Ensure that the --protect-kernel-defaults argument is set to true (Scored)" | System level configurations are required prior to provisioning the cluster in order for this argument to be set to true. -2.1.10 | "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)" | When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. - -### CIS Benchmark v1.4 Not Applicable Tests - -Number | Description | Reason for being not applicable ----|---|--- -1.1.9 | "Ensure that the --repair-malformed-updates argument is set to false (Scored)" | The argument --repair-malformed-updates has been removed as of Kubernetes version 1.14 -1.3.6 | "Ensure that the RotateKubeletServerCertificate argument is set to true" | Cluster provisioned by RKE handles certificate rotation directly through RKE. -1.4.1 | "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. -1.4.2 | "Ensure that the API server pod specification file ownership is set to root:root (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. -1.4.3 | "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -1.4.4 | "Ensure that the controller manager pod specification file ownership is set to root:root (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -1.4.5 | "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -1.4.6 | "Ensure that the scheduler pod specification file ownership is set to root:root (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -1.4.7 | "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. -1.4.8 | "Ensure that the etcd pod specification file ownership is set to root:root (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. -1.4.13 | "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Scored)" | Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. -1.4.14 | "Ensure that the admin.conf file ownership is set to root:root (Scored)" | Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. -2.1.8 | "Ensure that the --hostname-override argument is not set (Scored)" | Clusters provisioned by RKE clusters and most cloud providers require hostnames. -2.1.12 | "Ensure that the --rotate-certificates argument is not set to false (Scored)" | Cluster provisioned by RKE handles certificate rotation directly through RKE. -2.1.13 | "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" | Cluster provisioned by RKE handles certificate rotation directly through RKE. -2.2.3 | "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)" | Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. -2.2.4 | "Ensure that the kubelet service file ownership is set to root:root (Scored)" | Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. -2.2.9 | "Ensure that the kubelet configuration file ownership is set to root:root (Scored)" | RKE doesn’t require or maintain a configuration file for the kubelet. -2.2.10 | "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)" | RKE doesn’t require or maintain a configuration file for the kubelet. - - -# Prerequisites - -To run security scans on a cluster and access the generated reports, you must be an [Administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [Cluster Owner.]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) - -Rancher can only run security scans on clusters that were created with RKE, which includes custom clusters and clusters that Rancher created in an infrastructure provider such as Amazon EC2 or GCE. Imported clusters and clusters in hosted Kubernetes providers can't be scanned by Rancher. - -The security scan cannot run in a cluster that has Windows nodes. - -You will only be able to see the CIS scan reports for clusters that you have access to. - -# Running a Scan - -1. From the cluster view in Rancher, click **Tools > CIS Scans.** -1. Click **Run Scan.** -1. Choose a CIS scan profile. - -**Result:** A report is generated and displayed in the **CIS Scans** page. To see details of the report, click the report's name. - -# Scheduling Recurring Scans - -Recurring scans can be scheduled to run on any RKE Kubernetes cluster. - -To enable recurring scans, edit the advanced options in the cluster configuration during cluster creation or after the cluster has been created. - -To schedule scans for an existing cluster: - -1. Go to the cluster view in Rancher. -1. Click **Tools > CIS Scans.** -1. Click **Add Schedule.** This takes you to the section of the cluster editing page that is applicable to configuring a schedule for CIS scans. (This section can also be reached by going to the cluster view, clicking **⋮ > Edit,** and going to the **Advanced Options.**) -1. In the **CIS Scan Enabled** field, click **Yes.** -1. In the **CIS Scan Profile** field, choose a **Permissive** or **Hardened** profile. The corresponding CIS Benchmark version is included in the profile name. Note: Any skipped tests [defined in a separate ConfigMap](#skipping-tests) will be skipped regardless of whether a **Permissive** or **Hardened** profile is selected. When selecting the the permissive profile, you should see which tests were skipped by Rancher (tests that are skipped by default for RKE clusters) and which tests were skipped by a Rancher user. In the hardened test profile, the only skipped tests will be skipped by users. -1. In the **CIS Scan Interval (cron)** job, enter a [cron expression](https://en.wikipedia.org/wiki/Cron#CRON_expression) to define how often the cluster will be scanned. -1. In the **CIS Scan Report Retention** field, enter the number of past reports that should be kept. - -**Result:** The security scan will run and generate reports at the scheduled intervals. - -The test schedule can be configured in the `cluster.yml`: - -```yaml -scheduled_cluster_scan: -    enabled: true -    scan_config: -        cis_scan_config: -            override_benchmark_version: rke-cis-1.4 -            profile: permissive -    schedule_config: -        cron_schedule: 0 0 * * * -        retention: 24 -``` - - -# Skipping Tests - -You can define a set of tests that will be skipped by the CIS scan when the next report is generated. - -These tests will be skipped for subsequent CIS scans, including both manually triggered and scheduled scans, and the tests will be skipped with any profile. - -The skipped tests will be listed alongside the test profile name in the cluster configuration options when a test profile is selected for a recurring cluster scan. The skipped tests will also be shown every time a scan is triggered manually from the Rancher UI by clicking **Run Scan.** The display of skipped tests allows you to know ahead of time which tests will be run in each scan. - -To skip tests, you will need to define them in a Kubernetes ConfigMap resource. Each skipped CIS scan test is listed in the ConfigMap alongside the version of the CIS benchmark that the test belongs to. - -To skip tests by editing a ConfigMap resource, - -1. Create a `security-scan` namespace. -1. Create a ConfigMap named `security-scan-cfg`. -1. Enter the skip information under the key `config.json` in the following format: - - ```json - { - "skip": { - "rke-cis-1.4": [ - "1.1.1", - "1.2.2" - ] - } - } - ``` - - In the example above, the CIS benchmark version is specified alongside the tests to be skipped for that version. - -**Result:** These tests will be skipped on subsequent scans that use the defined CIS Benchmark version. - -# Setting Alerts - -Rancher provides a set of alerts for cluster scans. which are not configured to have notifiers by default: - -- A manual cluster scan was completed -- A manual cluster scan has failures -- A scheduled cluster scan was completed -- A scheduled cluster scan has failures - -> **Prerequisite:** You need to configure a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) before configuring, sending, or receiving alerts. - -To activate an existing alert for a CIS scan result, - -1. From the cluster view in Rancher, click **Tools > Alerts.** -1. Go to the section called **A set of alerts for cluster scans.** -1. Go to the alert you want to activate and click **⋮ > Activate.** -1. Go to the alert rule group **A set of alerts for cluster scans** and click **⋮ > Edit.** -1. Scroll down to the **Alert** section. In the **To** field, select the notifier that you would like to use for sending alert notifications. -1. Optional: To limit the frequency of the notifications, click on **Show advanced options** and configure the time interval of the alerts. -1. Click **Save.** - -**Result:** The notifications will be triggered when the a scan is run on a cluster and the active alerts have satisfied conditions. - -To create a new alert, - -1. Go to the cluster view and click **Tools > CIS Scans.** -1. Click **Add Alert.** -1. Fill out the form. -1. Enter a name for the alert. -1. In the **Is** field, set the alert to be triggered when a scan is completed or when a scan has a failure. -1. In the **Send a** field, set the alert as a **Critical,** **Warning,** or **Info** alert level. -1. Choose a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) for the alert. - -**Result:** The alert is created and activated. The notifications will be triggered when the a scan is run on a cluster and the active alerts have satisfied conditions. - -For more information about alerts, refer to [this page.]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) - -# Deleting a Report - -1. From the cluster view in Rancher, click **Tools > CIS Scans.** -1. Go to the report that should be deleted. -1. Click the **⋮ > Delete.** -1. Click **Delete.** - -# Downloading a Report - -1. From the cluster view in Rancher, click **Tools > CIS Scans.** -1. Go to the report that you want to download. Click **⋮ > Download.** - -**Result:** The report is downloaded in CSV format. For more information on each columns, refer to the [section about the generated report.](#about-the-generated-report) diff --git a/content/rancher/v2.5/en/system-tools/_index.md b/content/rancher/v2.5/en/system-tools/_index.md deleted file mode 100644 index 9707d6521e2..00000000000 --- a/content/rancher/v2.5/en/system-tools/_index.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: System Tools -weight: 17 ---- - -System Tools is a tool to perform operational tasks on [Rancher launched RKE Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters or [installations of Rancher on an RKE cluster.]({{}}/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/) The tasks include: - -* Collect logging and system metrics from nodes. -* Remove Kubernetes resources created by Rancher. - -The following commands are available: - -| Command | Description -|---|--- -| [logs](#logs) | Collect Kubernetes cluster component logs from nodes. -| [stats](#stats) | Stream system metrics from nodes. -| [remove](#remove) | Remove Kubernetes resources created by Rancher. - -# Download System Tools - -You can download the latest version of System Tools from the [GitHub releases page](https://github.com/rancher/system-tools/releases/latest). Download the version of `system-tools` for the OS that you are using to interact with the cluster. - -Operating System | Filename ------------------|----- -MacOS | `system-tools_darwin-amd64` -Linux | `system-tools_linux-amd64` -Windows | `system-tools_windows-amd64.exe` - -After you download the tools, complete the following actions: - -1. Rename the file to `system-tools`. - -1. Give the file executable permissions by running the following command: - - > **Using Windows?** - The file is already an executable, you can skip this step. - - ``` - chmod +x system-tools - ``` - -# Logs - -The logs subcommand will collect log files of core Kubernetes cluster components from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) or nodes on an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/). See [Troubleshooting]({{}}//rancher/v2.x/en/troubleshooting/) for a list of core Kubernetes cluster components. - -System Tools will use the provided kubeconfig file to deploy a DaemonSet, that will copy all the logfiles from the core Kubernetes cluster components and add them to a single tar file (`cluster-logs.tar` by default). If you only want to collect logging from a single node, you can specify the node by using `--node NODENAME` or `-n NODENAME`. - -### Usage - -``` -./system-tools_darwin-amd64 logs --kubeconfig -``` - -The following are the options for the logs command: - -| Option | Description -| ------------------------------------------------------ | ------------------------------------------------------ -| `--kubeconfig , -c ` | The cluster's kubeconfig file. -| `--output , -o cluster-logs.tar` | Name of the created tarball containing the logs. If no output filename is defined, the options defaults to `cluster-logs.tar`. -| `--node , -n node1` | Specify the nodes to collect the logs from. If no node is specified, logs from all nodes in the cluster will be collected. - -# Stats - -The stats subcommand will display system metrics from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) or nodes in an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/). - -System Tools will deploy a DaemonSet, and run a predefined command based on `sar` (System Activity Report) to show system metrics. - -### Usage - -``` -./system-tools_darwin-amd64 stats --kubeconfig -``` - -The following are the options for the stats command: - -| Option | Description -| ------------------------------------------------------ | ------------------------------ -| `--kubeconfig , -c ` | The cluster's kubeconfig file. -| `--node , -n node1` | Specify the nodes to display the system metrics from. If no node is specified, logs from all nodes in the cluster will be displayed. -| `--stats-command value, -s value` | The command to run to display the system metrics. If no command is defined, the options defaults to `/usr/bin/sar -u -r -F 1 1`. - -# Remove - ->**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{}}/rancher/v2.x/en/backups/backups) before executing the command. - -When you install Rancher on a Kubernetes cluster, it will create Kubernetes resources to run and to store configuration data. If you want to remove Rancher from your cluster, you can use the `remove` subcommand to remove the Kubernetes resources. When you use the `remove` subcommand, the following resources will be removed: - -- The Rancher deployment namespace (`cattle-system` by default). -- Any `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` that Rancher applied the `cattle.io/creator:norman` label to. Rancher applies this label to any resource that it creates as of v2.1.0. -- Labels, annotations, and finalizers. -- Rancher Deployment. -- Machines, clusters, projects, and user custom resource deployments (CRDs). -- All resources create under the `management.cattle.io` API Group. -- All CRDs created by Rancher v2.x. - ->**Using 2.0.8 or Earlier?** -> ->These versions of Rancher do not automatically delete the `serviceAccount`, `clusterRole`, and `clusterRoleBindings` resources after the job runs. You'll have to delete them yourself. - -### Usage - -When you run the command below, all the resources listed [above](#remove) will be removed from the cluster. - ->**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{}}/rancher/v2.x/en/backups/backups) before executing the command. - -``` -./system-tools remove --kubeconfig --namespace -``` - -The following are the options for the `remove` command: - -| Option | Description -| ---------------------------------------------- | ------------ -| `--kubeconfig , -c ` | The cluster's kubeconfig file -| `--namespace , -n cattle-system` | Rancher 2.x deployment namespace (``). If no namespace is defined, the options defaults to `cattle-system`. -| `--force` | Skips the interactive removal confirmation and removes the Rancher deployment without prompt. diff --git a/content/rancher/v2.5/en/troubleshooting/_index.md b/content/rancher/v2.5/en/troubleshooting/_index.md deleted file mode 100644 index 2e21c579727..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Troubleshooting -weight: 21 ---- - -This section contains information to help you troubleshoot issues when using Rancher. - -- [Kubernetes components]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/) - - If you need help troubleshooting core Kubernetes cluster components like: - * `etcd` - * `kube-apiserver` - * `kube-controller-manager` - * `kube-scheduler` - * `kubelet` - * `kube-proxy` - * `nginx-proxy` - -- [Kubernetes resources]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/) - - Options for troubleshooting Kubernetes resources like Nodes, Ingress Controller and Rancher Agents are described in this section. - -- [Networking]({{}}/rancher/v2.x/en/troubleshooting/networking/) - - Steps to troubleshoot networking issues can be found here. - -- [DNS]({{}}/rancher/v2.x/en/troubleshooting/dns/) - - When you experience name resolution issues in your cluster. - -- [Troubleshooting Rancher installed on Kubernetes]({{}}/rancher/v2.x/en/troubleshooting/rancherha/) - - If you experience issues with your [Rancher server installed on Kubernetes]({{}}/rancher/v2.x/en/installation/k8s-install/) - -- [Imported clusters]({{}}/rancher/v2.x/en/troubleshooting/imported-clusters/) - - If you experience issues when [Importing Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) - -- [Logging]({{}}/rancher/v2.x/en/troubleshooting/logging/) - - Read more about what log levels can be configured and how to configure a log level. - diff --git a/content/rancher/v2.5/en/troubleshooting/docker/_index.md b/content/rancher/v2.5/en/troubleshooting/docker/_index.md deleted file mode 100644 index ef7f0daf144..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/docker/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Troubleshooting Rancher Installed on a Single Node with Docker -weight: 3 ---- - -{{< ssl_faq_single >}} diff --git a/content/rancher/v2.5/en/troubleshooting/mcm/_index.md b/content/rancher/v2.5/en/troubleshooting/mcm/_index.md deleted file mode 100644 index 317c7a34f40..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/mcm/_index.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -title: Troubleshooting the Enterprise Cluster Manager -weight: 2 ---- - -> This page is under construction. - -This section describes how to troubleshoot an installation of Rancher on a Kubernetes cluster. - -### Relevant Namespaces - -Most of the troubleshooting will be done on objects in these 3 namespaces. - -- `cattle-system` - `rancher` deployment and pods. -- `ingress-nginx` - Ingress controller pods and services. -- `kube-system` - `tiller` and `cert-manager` pods. - -### "default backend - 404" - -A number of things can cause the ingress-controller not to forward traffic to your rancher instance. Most of the time it's due to a bad SSL configuration. - -Things to check - -- [Is Rancher Running](#is-rancher-running) -- [Cert CN is "Kubernetes Ingress Controller Fake Certificate"](#cert-cn-is-kubernetes-ingress-controller-fake-certificate) - -### Check if Rancher is Running - -Use `kubectl` to check the `cattle-system` system namespace and see if the Rancher pods are in a Running state. - -``` -kubectl -n cattle-system get pods - -NAME READY STATUS RESTARTS AGE -pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m -``` - -If the state is not `Running`, run a `describe` on the pod and check the Events. - -``` -kubectl -n cattle-system describe pod - -... -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 11m default-scheduler Successfully assigned rancher-784d94f59b-vgqzh to localhost - Normal SuccessfulMountVolume 11m kubelet, localhost MountVolume.SetUp succeeded for volume "rancher-token-dj4mt" - Normal Pulling 11m kubelet, localhost pulling image "rancher/rancher:v2.0.4" - Normal Pulled 11m kubelet, localhost Successfully pulled image "rancher/rancher:v2.0.4" - Normal Created 11m kubelet, localhost Created container - Normal Started 11m kubelet, localhost Started container -``` - -### Check the Rancher Logs - -Use `kubectl` to list the pods. - -``` -kubectl -n cattle-system get pods - -NAME READY STATUS RESTARTS AGE -pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m -``` - -Use `kubectl` and the pod name to list the logs from the pod. - -``` -kubectl -n cattle-system logs -f rancher-784d94f59b-vgqzh -``` - -### Cert CN is "Kubernetes Ingress Controller Fake Certificate" - -Use your browser to check the certificate details. If it says the Common Name is "Kubernetes Ingress Controller Fake Certificate", something may have gone wrong with reading or issuing your SSL cert. - -> **Note:** if you are using LetsEncrypt to issue certs it can sometimes take a few minuets to issue the cert. - -### Checking for issues with cert-manager issued certs (Rancher Generated or LetsEncrypt) - -`cert-manager` has 3 parts. - -- `cert-manager` pod in the `kube-system` namespace. -- `Issuer` object in the `cattle-system` namespace. -- `Certificate` object in the `cattle-system` namespace. - -Work backwards and do a `kubectl describe` on each object and check the events. You can track down what might be missing. - -For example there is a problem with the Issuer: - -``` -kubectl -n cattle-system describe certificate -... -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Warning IssuerNotReady 18s (x23 over 19m) cert-manager Issuer rancher not ready -``` - -``` -kubectl -n cattle-system describe issuer -... -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Warning ErrInitIssuer 19m (x12 over 19m) cert-manager Error initializing issuer: secret "tls-rancher" not found - Warning ErrGetKeyPair 9m (x16 over 19m) cert-manager Error getting keypair for CA issuer: secret "tls-rancher" not found -``` - -### Checking for Issues with Your Own SSL Certs - -Your certs get applied directly to the Ingress object in the `cattle-system` namespace. - -Check the status of the Ingress object and see if its ready. - -``` -kubectl -n cattle-system describe ingress -``` - -If its ready and the SSL is still not working you may have a malformed cert or secret. - -Check the nginx-ingress-controller logs. Because the nginx-ingress-controller has multiple containers in its pod you will need to specify the name of the container. - -``` -kubectl -n ingress-nginx logs -f nginx-ingress-controller-rfjrq nginx-ingress-controller -... -W0705 23:04:58.240571 7 backend_ssl.go:49] error obtaining PEM from secret cattle-system/tls-rancher-ingress: error retrieving secret cattle-system/tls-rancher-ingress: secret cattle-system/tls-rancher-ingress was not found -``` - -### No matches for kind "Issuer" - -The [SSL configuration]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/#choose-your-ssl-configuration) option you have chosen requires [cert-manager]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/#optional-install-cert-manager) to be installed before installing Rancher or else the following error is shown: - -``` -Error: validation failed: unable to recognize "": no matches for kind "Issuer" in version "certmanager.k8s.io/v1alpha1" -``` - -Install [cert-manager]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/#optional-install-cert-manager) and try installing Rancher again. - - -### Canal Pods show READY 2/3 - -The most common cause of this issue is port 8472/UDP is not open between the nodes. Check your local firewall, network routing or security groups. - -Once the network issue is resolved, the `canal` pods should timeout and restart to establish their connections. - -### nginx-ingress-controller Pods show RESTARTS - -The most common cause of this issue is the `canal` pods have failed to establish the overlay network. See [canal Pods show READY `2/3`](#canal-pods-show-ready-2-3) for troubleshooting. - - -### Failed to dial to /var/run/docker.sock: ssh: rejected: administratively prohibited (open failed) - -Some causes of this error include: - -* User specified to connect with does not have permission to access the Docker socket. This can be checked by logging into the host and running the command `docker ps`: - -``` -$ ssh user@server -user@server$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -``` - -See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) how to set this up properly. - -* When using RedHat/CentOS as operating system, you cannot use the user `root` to connect to the nodes because of [Bugzilla #1527565](https://bugzilla.redhat.com/show_bug.cgi?id=1527565). You will need to add a separate user and configure it to access the Docker socket. See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) how to set this up properly. - -* SSH server version is not version 6.7 or higher. This is needed for socket forwarding to work, which is used to connect to the Docker socket over SSH. This can be checked using `sshd -V` on the host you are connecting to, or using netcat: -``` -$ nc xxx.xxx.xxx.xxx 22 -SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2.10 -``` - -### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: no key found - -The key file specified as `ssh_key_path` cannot be accessed. Make sure that you specified the private key file (not the public key, `.pub`), and that the user that is running the `rke` command can access the private key file. - -### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain - -The key file specified as `ssh_key_path` is not correct for accessing the node. Double-check if you specified the correct `ssh_key_path` for the node and if you specified the correct user to connect with. - -### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: cannot decode encrypted private keys - -If you want to use encrypted private keys, you should use `ssh-agent` to load your keys with your passphrase. If the `SSH_AUTH_SOCK` environment variable is found in the environment where the `rke` command is run, it will be used automatically to connect to the node. - -### Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running? - -The node is not reachable on the configured `address` and `port`. diff --git a/content/rancher/v2.5/en/troubleshooting/mcm/dns/_index.md b/content/rancher/v2.5/en/troubleshooting/mcm/dns/_index.md deleted file mode 100644 index ecbe88a7588..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/mcm/dns/_index.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: DNS -weight: 103 ---- - -The commands/steps listed on this page can be used to check name resolution issues in your cluster. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. - -Before running the DNS checks, check the [default DNS provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly]({{}}/rancher/v2.x/en/troubleshooting/networking/#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. - -### Check if DNS pods are running - -``` -kubectl -n kube-system get pods -l k8s-app=kube-dns -``` - -Example output when using CoreDNS: -``` -NAME READY STATUS RESTARTS AGE -coredns-799dffd9c4-6jhlz 1/1 Running 0 76m -``` - -Example output when using kube-dns: -``` -NAME READY STATUS RESTARTS AGE -kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s -``` - -### Check if the DNS service is present with the correct cluster-ip - -``` -kubectl -n kube-system get svc -l k8s-app=kube-dns -``` - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s -``` - -### Check if domain names are resolving - -Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. - -``` -kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup kubernetes.default -``` - -Example output: -``` -Server: 10.43.0.10 -Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local - -Name: kubernetes.default -Address 1: 10.43.0.1 kubernetes.default.svc.cluster.local -pod "busybox" deleted -``` - -Check if external names are resolving (in this example, `www.google.com`) - -``` -kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup www.google.com -``` - -Example output: -``` -Server: 10.43.0.10 -Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local - -Name: www.google.com -Address 1: 2a00:1450:4009:80b::2004 lhr35s04-in-x04.1e100.net -Address 2: 216.58.211.100 ams15s32-in-f4.1e100.net -pod "busybox" deleted -``` - -If you want to check resolving of domain names on all of the hosts, execute the following steps: - -1. Save the following file as `ds-dnstest.yml` - - ``` - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: dnstest - spec: - selector: - matchLabels: - name: dnstest - template: - metadata: - labels: - name: dnstest - spec: - tolerations: - - operator: Exists - containers: - - image: busybox:1.28 - imagePullPolicy: Always - name: alpine - command: ["sh", "-c", "tail -f /dev/null"] - terminationMessagePath: /dev/termination-log - ``` - -2. Launch it using `kubectl create -f ds-dnstest.yml` -3. Wait until `kubectl rollout status ds/dnstest -w` returns: `daemon set "dnstest" successfully rolled out`. -4. Configure the environment variable `DOMAIN` to a fully qualified domain name (FQDN) that the host should be able to resolve (`www.google.com` is used as an example) and run the following command to let each container on every host resolve the configured domain name (it's a single line command). - - ``` - export DOMAIN=www.google.com; echo "=> Start DNS resolve test"; kubectl get pods -l name=dnstest --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do kubectl exec $pod -- /bin/sh -c "nslookup $DOMAIN > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $host cannot resolve $DOMAIN; fi; done; echo "=> End DNS resolve test" - ``` - -5. When this command has finished running, the output indicating everything is correct is: - - ``` - => Start DNS resolve test - => End DNS resolve test - ``` - -If you see error in the output, that means that the mentioned host(s) is/are not able to resolve the given FQDN. - -Example error output of a situation where host with IP 209.97.182.150 had the UDP ports blocked. - -``` -=> Start DNS resolve test -command terminated with exit code 1 -209.97.182.150 cannot resolve www.google.com -=> End DNS resolve test -``` - -Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. - -### CoreDNS specific - -#### Check CoreDNS logging - -``` -kubectl -n kube-system logs -l k8s-app=kube-dns -``` - -#### Check configuration - -CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. - -``` -kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} -``` - -#### Check upstream nameservers in resolv.conf - -By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. - -``` -kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' -``` - -#### Enable query logging - -Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: - -``` -kubectl get configmap -n kube-system coredns -o json | kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log\\n loadbalance_g' | kubectl apply -f - -``` - -All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). - -### kube-dns specific - -#### Check upstream nameservers in kubedns container - -By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. Since Rancher v2.0.7, we detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). - -Use the following command to check the upstream nameservers used by the kubedns container: - -``` -kubectl -n kube-system get pods -l k8s-app=kube-dns --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do echo "Pod ${pod} on host ${host}"; kubectl -n kube-system exec $pod -c kubedns cat /etc/resolv.conf; done -``` - -Example output: -``` -Pod kube-dns-667c7cb9dd-z4dsf on host x.x.x.x -nameserver 1.1.1.1 -nameserver 8.8.4.4 -``` - -If the output shows an address in the loopback range (`127.0.0.0/8`), you can correct this in two ways: - -* Make sure the correct nameservers are listed in `/etc/resolv.conf` on your nodes in the cluster, please consult your operating system documentation on how to do this. Make sure you execute this before provisioning a cluster, or reboot the nodes after making the modification. -* Configure the `kubelet` to use a different file for resolving names, by using `extra_args` as shown below (where `/run/resolvconf/resolv.conf` is the file with the correct nameservers): - -``` -services: - kubelet: - extra_args: - resolv-conf: "/run/resolvconf/resolv.conf" -``` - -> **Note:** As the `kubelet` is running inside a container, the path for files located in `/etc` and `/usr` are in `/host/etc` and `/host/usr` inside the `kubelet` container. - -See [Editing Cluster as YAML]({{}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters/#editing-cluster-as-yaml) how to apply this change. When the provisioning of the cluster has finished, you have to remove the kube-dns pod to activate the new setting in the pod: - -``` -kubectl delete pods -n kube-system -l k8s-app=kube-dns -pod "kube-dns-5fd74c7488-6pwsf" deleted -``` - -Try to resolve name again using [Check if domain names are resolving](#check-if-domain-names-are-resolving). - -If you want to check the kube-dns configuration in your cluster (for example, to check if there are different upstream nameservers configured), you can run the following command to list the kube-dns configuration: - -``` -kubectl -n kube-system get configmap kube-dns -o go-template='{{range $key, $value := .data}}{{ $key }}{{":"}}{{ $value }}{{"\n"}}{{end}}' -``` - -Example output: -``` -upstreamNameservers:["1.1.1.1"] -``` diff --git a/content/rancher/v2.5/en/troubleshooting/mcm/imported-clusters/_index.md b/content/rancher/v2.5/en/troubleshooting/mcm/imported-clusters/_index.md deleted file mode 100644 index b7a6e6b28eb..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/mcm/imported-clusters/_index.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Imported clusters -weight: 105 ---- - -The commands/steps listed on this page can be used to check clusters that you are importing or that are imported in Rancher. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kubeconfig_from_imported_cluster.yml`) - -### Rancher agents - -Communication to the cluster (Kubernetes API via cattle-cluster-agent) and communication to the nodes is done through Rancher agents. - -If the cattle-cluster-agent cannot connect to the configured `server-url`, the cluster will remain in **Pending** state, showing `Waiting for full cluster configuration`. - -#### cattle-node-agent - -Check if the cattle-node-agent pods are present on each node, have status **Running** and don't have a high count of Restarts: - -``` -kubectl -n cattle-system get pods -l app=cattle-agent -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -cattle-node-agent-4gc2p 1/1 Running 0 2h x.x.x.x worker-1 -cattle-node-agent-8cxkk 1/1 Running 0 2h x.x.x.x etcd-1 -cattle-node-agent-kzrlg 1/1 Running 0 2h x.x.x.x etcd-0 -cattle-node-agent-nclz9 1/1 Running 0 2h x.x.x.x controlplane-0 -cattle-node-agent-pwxp7 1/1 Running 0 2h x.x.x.x worker-0 -cattle-node-agent-t5484 1/1 Running 0 2h x.x.x.x controlplane-1 -cattle-node-agent-t8mtz 1/1 Running 0 2h x.x.x.x etcd-2 -``` - -Check logging of a specific cattle-node-agent pod or all cattle-node-agent pods: - -``` -kubectl -n cattle-system logs -l app=cattle-agent -``` - -#### cattle-cluster-agent - -Check if the cattle-cluster-agent pod is present in the cluster, has status **Running** and doesn't have a high count of Restarts: - -``` -kubectl -n cattle-system get pods -l app=cattle-cluster-agent -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -cattle-cluster-agent-54d7c6c54d-ht9h4 1/1 Running 0 2h x.x.x.x worker-1 -``` - -Check logging of cattle-cluster-agent pod: - -``` -kubectl -n cattle-system logs -l app=cattle-cluster-agent -``` diff --git a/content/rancher/v2.5/en/troubleshooting/mcm/logging/_index.md b/content/rancher/v2.5/en/troubleshooting/mcm/logging/_index.md deleted file mode 100644 index 50024334901..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/mcm/logging/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Logging -weight: 110 ---- - -The following log levels are used in Rancher: - -| Name | Description | -|---------|-------------| -| `info` | Logs informational messages. This is the default log level. | -| `debug` | Logs more detailed messages that can be used to debug. | -| `trace` | Logs very detailed messages on internal functions. This is very verbose and can contain sensitive information. | - -### How to configure a log level - -* Kubernetes install - * Configure debug log level -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | awk '{ print $1 }' | while read rancherpod; do kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $rancherpod -- loglevel --set debug; done -OK -OK -OK -$ kubectl --kubeconfig $KUBECONFIG -n cattle-system logs -l app=rancher -``` - - * Configure info log level -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | awk '{ print $1 }' | while read rancherpod; do kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $rancherpod -- loglevel --set info; done -OK -OK -OK -``` - -* Docker Install - * Configure debug log level -``` -$ docker exec -ti loglevel --set debug -OK -$ docker logs -f -``` - - * Configure info log level -``` -$ docker exec -ti loglevel --set info -OK -``` diff --git a/content/rancher/v2.5/en/troubleshooting/mcm/networking/_index.md b/content/rancher/v2.5/en/troubleshooting/mcm/networking/_index.md deleted file mode 100644 index 7259b61a3e0..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/mcm/networking/_index.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Networking -weight: 102 ---- - -The commands/steps listed on this page can be used to check networking related issues in your cluster. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. - -### Double check if all the required ports are opened in your (host) firewall - -Double check if all the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. - -### Check if overlay network is functioning correctly - -The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. - -To test the overlay network, you can launch the following `DaemonSet` definition. This will run a `busybox` container on every host, which we will use to run a `ping` test between containers on all hosts. - -1. Save the following file as `ds-overlaytest.yml` - - ``` - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: overlaytest - spec: - selector: - matchLabels: - name: overlaytest - template: - metadata: - labels: - name: overlaytest - spec: - tolerations: - - operator: Exists - containers: - - image: busybox:1.28 - imagePullPolicy: Always - name: busybox - command: ["sh", "-c", "tail -f /dev/null"] - terminationMessagePath: /dev/termination-log - ``` - -2. Launch it using `kubectl create -f ds-overlaytest.yml` -3. Wait until `kubectl rollout status ds/overlaytest -w` returns: `daemon set "overlaytest" successfully rolled out`. -4. Run the following command, from the same location, to let each container on every host ping each other (it's a single line bash command). - - ``` - echo "=> Start network overlay test"; kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | while read spod shost; do kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | while read tip thost; do kubectl --request-timeout='10s' exec $spod -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $shost cannot reach $thost; fi; done; done; echo "=> End network overlay test" - ``` - -5. When this command has finished running, the output indicating everything is correct is: - - ``` - => Start network overlay test - => End network overlay test - ``` - -If you see error in the output, that means that the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for overlay networking are not opened between the hosts indicated. - -Example error output of a situation where NODE1 had the UDP ports blocked. - -``` -=> Start network overlay test -command terminated with exit code 1 -NODE2 cannot reach NODE1 -command terminated with exit code 1 -NODE3 cannot reach NODE1 -command terminated with exit code 1 -NODE1 cannot reach NODE2 -command terminated with exit code 1 -NODE1 cannot reach NODE3 -=> End network overlay test -``` - -Cleanup the busybox DaemonSet by running `kubectl delete ds/overlaytest`. - -### Check if MTU is correctly configured on hosts and on peering/tunnel appliances/devices - -When the MTU is incorrectly configured (either on hosts running Rancher, nodes in created/imported clusters or on appliances/devices in between), error messages will be logged in Rancher and in the agents, similar to: - -* `websocket: bad handshake` -* `Failed to connect to proxy` -* `read tcp: i/o timeout` - -See [Google Cloud VPN: MTU Considerations](https://cloud.google.com/vpn/docs/concepts/mtu-considerations#gateway_mtu_vs_system_mtu) for an example how to configure MTU correctly when using Google Cloud VPN between Rancher and cluster nodes. - -### Resolved issues - -#### Overlay network broken when using Canal/Flannel due to missing node annotations - -| | | -|------------|------------| -| GitHub issue | [#13644](https://github.com/rancher/rancher/issues/13644) | -| Resolved in | v2.1.2 | - -To check if your cluster is affected, the following command will list nodes that are broken (this command requires `jq` to be installed): - -``` -kubectl get nodes -o json | jq '.items[].metadata | select(.annotations["flannel.alpha.coreos.com/public-ip"] == null or .annotations["flannel.alpha.coreos.com/kube-subnet-manager"] == null or .annotations["flannel.alpha.coreos.com/backend-type"] == null or .annotations["flannel.alpha.coreos.com/backend-data"] == null) | .name' -``` - -If there is no output, the cluster is not affected. - -#### System namespace pods network connectivity broken - -> **Note:** This applies only to Rancher upgrades from v2.0.6 or earlier to v2.0.7 or later. Upgrades from v2.0.7 to later version are unaffected. - -| | | -|------------|------------| -| GitHub issue | [#15146](https://github.com/rancher/rancher/issues/15146) | - -If pods in system namespaces cannot communicate with pods in other system namespaces, you will need to follow the instructions in [Upgrading to v2.0.7+ — Namespace Migration]({{}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/) to restore connectivity. Symptoms include: - -- NGINX ingress controller showing `504 Gateway Time-out` when accessed. -- NGINX ingress controller logging `upstream timed out (110: Connection timed out) while connecting to upstream` when accessed. diff --git a/content/rancher/v2.5/en/troubleshooting/rke/_index.md b/content/rancher/v2.5/en/troubleshooting/rke/_index.md deleted file mode 100644 index c048db2fcf2..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/rke/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Troubleshooting an RKE Cluster -weight: 1 ---- - -The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. - -This section includes troubleshooting tips in the following categories: - -- [Troubleshooting etcd Nodes]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd) -- [Troubleshooting Controlplane Nodes]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane) -- [Troubleshooting nginx-proxy Nodes]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy) -- [Troubleshooting Worker Nodes and Generic Components]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic) - -# Kubernetes Component Diagram - -![Cluster diagram]({{}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid \ No newline at end of file diff --git a/content/rancher/v2.5/en/troubleshooting/rke/controlplane/_index.md b/content/rancher/v2.5/en/troubleshooting/rke/controlplane/_index.md deleted file mode 100644 index 1ca42591cf2..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/rke/controlplane/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Troubleshooting Controlplane Nodes -weight: 2 ---- - -This section applies to nodes with the `controlplane` role. - -# Check if the Controlplane Containers are Running - -There are three specific containers launched on nodes with the `controlplane` role: - -* `kube-apiserver` -* `kube-controller-manager` -* `kube-scheduler` - -The containers should have status **Up**. The duration shown after **Up** is the time the container has been running. - -``` -docker ps -a -f=name='kube-apiserver|kube-controller-manager|kube-scheduler' -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -26c7159abbcc rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-apiserver -f3d287ca4549 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-scheduler -bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-controller-manager -``` - -# Controlplane Container Logging - -> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/#kubernetes-leader-election) how to retrieve the current leader. - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs kube-apiserver -docker logs kube-controller-manager -docker logs kube-scheduler -``` \ No newline at end of file diff --git a/content/rancher/v2.5/en/troubleshooting/rke/etcd/_index.md b/content/rancher/v2.5/en/troubleshooting/rke/etcd/_index.md deleted file mode 100644 index f83d241a08a..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/rke/etcd/_index.md +++ /dev/null @@ -1,365 +0,0 @@ ---- -title: Troubleshooting etcd Nodes -weight: 1 ---- - -This section contains commands and tips for troubleshooting nodes with the `etcd` role. - -This page covers the following topics: - -- [Checking if the etcd Container is Running](#checking-if-the-etcd-container-is-running) -- [etcd Container Logging](#etcd-container-logging) -- [etcd Cluster and Connectivity Checks](#etcd-cluster-and-connectivity-checks) - - [Check etcd Members on all Nodes](#check-etcd-members-on-all-nodes) - - [Check Endpoint Status](#check-endpoint-status) - - [Check Endpoint Health](#check-endpoint-health) - - [Check Connectivity on Port TCP/2379](#check-connectivity-on-port-tcp-2379) - - [Check Connectivity on Port TCP/2380](#check-connectivity-on-port-tcp-2380) -- [etcd Alarms](#etcd-alarms) -- [etcd Space Errors](#etcd-space-errors) -- [Log Level](#log-level) -- [etcd Content](#etcd-content) - - [Watch Streaming Events](#watch-streaming-events) - - [Query etcd Directly](#query-etcd-directly) -- [Replacing Unhealthy etcd Nodes](#replacing-unhealthy-etcd-nodes) - -# Checking if the etcd Container is Running - -The container for etcd should have status **Up**. The duration shown after **Up** is the time the container has been running. - -``` -docker ps -a -f=name=etcd$ -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -605a124503b9 rancher/coreos-etcd:v3.2.18 "/usr/local/bin/et..." 2 hours ago Up 2 hours etcd -``` - -# etcd Container Logging - -The logging of the container can contain information on what the problem could be. - -``` -docker logs etcd -``` -| Log | Explanation | -|-----|------------------| -| `health check for peer xxx could not connect: dial tcp IP:2380: getsockopt: connection refused` | A connection to the address shown on port 2380 cannot be established. Check if the etcd container is running on the host with the address shown. | -| `xxx is starting a new election at term x` | The etcd cluster has lost its quorum and is trying to establish a new leader. This can happen when the majority of the nodes running etcd go down/unreachable. | -| `connection error: desc = "transport: Error while dialing dial tcp 0.0.0.0:2379: i/o timeout"; Reconnecting to {0.0.0.0:2379 0 }` | The host firewall is preventing network communication. | -| `rafthttp: request cluster ID mismatch` | The node with the etcd instance logging `rafthttp: request cluster ID mismatch` is trying to join a cluster that has already been formed with another peer. The node should be removed from the cluster, and re-added. | -| `rafthttp: failed to find member` | The cluster state (`/var/lib/etcd`) contains wrong information to join the cluster. The node should be removed from the cluster, the state directory should be cleaned and the node should be re-added. - -# etcd Cluster and Connectivity Checks - -The address where etcd is listening depends on the address configuration of the host etcd is running on. If an internal address is configured for the host etcd is running on, the endpoint for `etcdctl` needs to be specified explicitly. If any of the commands respond with `Error: context deadline exceeded`, the etcd instance is unhealthy (either quorum is lost or the instance is not correctly joined in the cluster) - -### Check etcd Members on all Nodes - -Output should contain all the nodes with the `etcd` role and the output should be identical on all nodes. - -Command: -``` -docker exec etcd etcdctl member list -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list" -``` - -Example output: -``` -xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 -xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 -xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 -``` - -### Check Endpoint Status - -The values for `RAFT TERM` should be equal and `RAFT INDEX` should be not be too far apart from each other. - -Command: -``` -docker exec -e ETCDCTL_ENDPOINTS=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") etcd etcdctl endpoint status --write-out table -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table -``` - -Example output: -``` -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| https://IP:2379 | 333ef673fc4add56 | 3.2.18 | 24 MB | false | 72 | 66887 | -| https://IP:2379 | 5feed52d940ce4cf | 3.2.18 | 24 MB | true | 72 | 66887 | -| https://IP:2379 | db6b3bdb559a848d | 3.2.18 | 25 MB | false | 72 | 66887 | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -``` - -### Check Endpoint Health - -Command: -``` -docker exec -e ETCDCTL_ENDPOINTS=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") etcd etcdctl endpoint health -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd etcdctl endpoint health --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") -``` - -Example output: -``` -https://IP:2379 is healthy: successfully committed proposal: took = 2.113189ms -https://IP:2379 is healthy: successfully committed proposal: took = 2.649963ms -https://IP:2379 is healthy: successfully committed proposal: took = 2.451201ms -``` - -### Check Connectivity on Port TCP/2379 - -Command: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5"); do - echo "Validating connection to ${endpoint}/health" - docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" -done -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5"); do - echo "Validating connection to ${endpoint}/health"; - docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" -done -``` - -Example output: -``` -Validating connection to https://IP:2379/health -{"health": "true"} -Validating connection to https://IP:2379/health -{"health": "true"} -Validating connection to https://IP:2379/health -{"health": "true"} -``` - -### Check Connectivity on Port TCP/2380 - -Command: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f4"); do - echo "Validating connection to ${endpoint}/version"; - docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl --http1.1 -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" -done -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f4"); do - echo "Validating connection to ${endpoint}/version"; - docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl --http1.1 -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" -done -``` - -Example output: -``` -Validating connection to https://IP:2380/version -{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} -Validating connection to https://IP:2380/version -{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} -Validating connection to https://IP:2380/version -{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} -``` - -# etcd Alarms - -etcd will trigger alarms, for instance when it runs out of space. - -Command: -``` -docker exec etcd etcdctl alarm list -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" -``` - -Example output when NOSPACE alarm is triggered: -``` -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -``` - -# etcd Space Errors - -Related error messages are `etcdserver: mvcc: database space exceeded` or `applying raft message exceeded backend quota`. Alarm `NOSPACE` will be triggered. - -Resolutions: - -- [Compact the Keyspace](#compact-the-keyspace) -- [Defrag All etcd Members](#defrag-all-etcd-members) -- [Check Endpoint Status](#check-endpoint-status) -- [Disarm Alarm](#disarm-alarm) - -### Compact the Keyspace - -Command: -``` -rev=$(docker exec etcd etcdctl endpoint status --write-out json | egrep -o '"revision":[0-9]*' | egrep -o '[0-9]*') -docker exec etcd etcdctl compact "$rev" -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -rev=$(docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT endpoint status --write-out json | egrep -o '\"revision\":[0-9]*' | egrep -o '[0-9]*'") -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT compact \"$rev\"" -``` - -Example output: -``` -compacted revision xxx -``` - -### Defrag All etcd Members - -Command: -``` -docker exec -e ETCDCTL_ENDPOINTS=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") etcd etcdctl defrag -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl defrag --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','")" -``` - -Example output: -``` -Finished defragmenting etcd member[https://IP:2379] -Finished defragmenting etcd member[https://IP:2379] -Finished defragmenting etcd member[https://IP:2379] -``` - -### Check Endpoint Status - -Command: -``` -docker exec -e ETCDCTL_ENDPOINTS=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") etcd etcdctl endpoint status --write-out table -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table" -``` - -Example output: -``` -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| https://IP:2379 | e973e4419737125 | 3.2.18 | 553 kB | false | 32 | 2449410 | -| https://IP:2379 | 4a509c997b26c206 | 3.2.18 | 553 kB | false | 32 | 2449410 | -| https://IP:2379 | b217e736575e9dd3 | 3.2.18 | 553 kB | true | 32 | 2449410 | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -``` - -### Disarm Alarm - -After verifying that the DB size went down after compaction and defragmenting, the alarm needs to be disarmed for etcd to allow writes again. - -Command: -``` -docker exec etcd etcdctl alarm list -docker exec etcd etcdctl alarm disarm -docker exec etcd etcdctl alarm list -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm disarm" -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" -``` - -Example output: -``` -docker exec etcd etcdctl alarm list -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -docker exec etcd etcdctl alarm disarm -docker exec etcd etcdctl alarm list -``` - -# Log Level - -The log level of etcd can be changed dynamically via the API. You can configure debug logging using the commands below. - -Command: -``` -docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINTS)/config/local/log -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINT)/config/local/log -``` - -To reset the log level back to the default (`INFO`), you can use the following command. - -Command: -``` -docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINTS)/config/local/log -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINT)/config/local/log -``` - -# etcd Content - -If you want to investigate the contents of your etcd, you can either watch streaming events or you can query etcd directly, see below for examples. - -### Watch Streaming Events - -Command: -``` -docker exec etcd etcdctl watch --prefix /registry -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd etcdctl --endpoints=\$ETCDCTL_ENDPOINT watch --prefix /registry -``` - -If you only want to see the affected keys (and not the binary data), you can append `| grep -a ^/registry` to the command to filter for keys only. - -### Query etcd Directly - -Command: -``` -docker exec etcd etcdctl get /registry --prefix=true --keys-only -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd etcdctl --endpoints=\$ETCDCTL_ENDPOINT get /registry --prefix=true --keys-only -``` - -You can process the data to get a summary of count per key, using the command below: - -``` -docker exec etcd etcdctl get /registry --prefix=true --keys-only | grep -v ^$ | awk -F'/' '{ if ($3 ~ /cattle.io/) {h[$3"/"$4]++} else { h[$3]++ }} END { for(k in h) print h[k], k }' | sort -nr -``` - -# Replacing Unhealthy etcd Nodes - -When a node in your etcd cluster becomes unhealthy, the recommended approach is to fix or remove the failed or unhealthy node before adding a new etcd node to the cluster. diff --git a/content/rancher/v2.5/en/troubleshooting/rke/kubernetes-resources/_index.md b/content/rancher/v2.5/en/troubleshooting/rke/kubernetes-resources/_index.md deleted file mode 100644 index 507573b7b69..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/rke/kubernetes-resources/_index.md +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: Kubernetes resources -weight: 5 ---- - -The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. - -- [Nodes](#nodes) - - [Get nodes](#get-nodes) - - [Get node conditions](#get-node-conditions) -- [Kubernetes leader election](#kubernetes-leader-election) - - [Kubernetes controller manager leader](#kubernetes-controller-manager-leader) - - [Kubernetes scheduler leader](#kubernetes-scheduler-leader) -- [Ingress controller](#ingress-controller) - - [Pod details](#pod-details) - - [Pod container logs](#pod-container-logs) - - [Namespace events](#namespace-events) - - [Debug logging](#debug-logging) - - [Check configuration](#check-configuration) -- [Rancher agents](#rancher-agents) - - [cattle-node-agent](#cattle-node-agent) - - [cattle-cluster-agent](#cattle-cluster-agent) -- [Jobs and pods](#jobs-and-pods) - - [Check that pods or jobs have status Running/Completed](#check-that-pods-or-jobs-have-status-running-completed) - - [Describe pod](#describe-pod) - - [Pod container logs](#pod-container-logs) - - [Describe job](#describe-job) - - [Logs from the containers of pods of the job](#logs-from-the-containers-of-pods-of-the-job) - - [Evicted pods](#evicted-pods) - - [Job does not complete](#job-does-not-complete) - -# Nodes - -### Get nodes - -Run the command below and check the following: - -- All nodes in your cluster should be listed, make sure there is not one missing. -- All nodes should have the **Ready** status (if not in **Ready** state, check the `kubelet` container logs on that node using `docker logs kubelet`) -- Check if all nodes report the correct version. -- Check if OS/Kernel/Docker values are shown as expected (possibly you can relate issues due to upgraded OS/Kernel/Docker) - - -``` -kubectl get nodes -o wide -``` - -Example output: - -``` -NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME -controlplane-0 Ready controlplane 31m v1.13.5 138.68.188.91 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 -etcd-0 Ready etcd 31m v1.13.5 138.68.180.33 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 -worker-0 Ready worker 30m v1.13.5 139.59.179.88 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 -``` - -### Get node conditions - -Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) - -``` -kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' -``` - -Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) that are active that could prevent normal operation. - -``` -kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{if ne .type "Ready"}}{{if eq .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{else}}{{if ne .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{": "}}{{.status}}{{"\n"}}{{end}}{{end}}{{end}}{{end}}' -``` - -Example output: - -``` -worker-0: DiskPressure:True -``` - -# Kubernetes leader election - -### Kubernetes Controller Manager leader - -The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-controller-manager` endpoint (in this example, `controlplane-0`). - -``` -kubectl -n kube-system get endpoints kube-controller-manager -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' -{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> -``` - -### Kubernetes Scheduler leader - -The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-scheduler` endpoint (in this example, `controlplane-0`). - -``` -kubectl -n kube-system get endpoints kube-scheduler -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' -{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> -``` - -# Ingress Controller - -The default Ingress Controller is NGINX and is deployed as a DaemonSet in the `ingress-nginx` namespace. The pods are only scheduled to nodes with the `worker` role. - -Check if the pods are running on all nodes: - -``` -kubectl -n ingress-nginx get pods -o wide -``` - -Example output: - -``` -kubectl -n ingress-nginx get pods -o wide -NAME READY STATUS RESTARTS AGE IP NODE -default-http-backend-797c5bc547-kwwlq 1/1 Running 0 17m x.x.x.x worker-1 -nginx-ingress-controller-4qd64 1/1 Running 0 14m x.x.x.x worker-1 -nginx-ingress-controller-8wxhm 1/1 Running 0 13m x.x.x.x worker-0 -``` - -If a pod is unable to run (Status is not **Running**, Ready status is not showing `1/1` or you see a high count of Restarts), check the pod details, logs and namespace events. - -### Pod details - -``` -kubectl -n ingress-nginx describe pods -l app=ingress-nginx -``` - -### Pod container logs - -``` -kubectl -n ingress-nginx logs -l app=ingress-nginx -``` - -### Namespace events - -``` -kubectl -n ingress-nginx get events -``` - -### Debug logging - -To enable debug logging: - -``` -kubectl -n ingress-nginx patch ds nginx-ingress-controller --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--v=5"}]' -``` - -### Check configuration - -Retrieve generated configuration in each pod: - -``` -kubectl -n ingress-nginx get pods -l app=ingress-nginx --no-headers -o custom-columns=.NAME:.metadata.name | while read pod; do kubectl -n ingress-nginx exec $pod -- cat /etc/nginx/nginx.conf; done -``` - -# Rancher agents - -Communication to the cluster (Kubernetes API via `cattle-cluster-agent`) and communication to the nodes (cluster provisioning via `cattle-node-agent`) is done through Rancher agents. - -#### cattle-node-agent - -Check if the cattle-node-agent pods are present on each node, have status **Running** and don't have a high count of Restarts: - -``` -kubectl -n cattle-system get pods -l app=cattle-agent -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -cattle-node-agent-4gc2p 1/1 Running 0 2h x.x.x.x worker-1 -cattle-node-agent-8cxkk 1/1 Running 0 2h x.x.x.x etcd-1 -cattle-node-agent-kzrlg 1/1 Running 0 2h x.x.x.x etcd-0 -cattle-node-agent-nclz9 1/1 Running 0 2h x.x.x.x controlplane-0 -cattle-node-agent-pwxp7 1/1 Running 0 2h x.x.x.x worker-0 -cattle-node-agent-t5484 1/1 Running 0 2h x.x.x.x controlplane-1 -cattle-node-agent-t8mtz 1/1 Running 0 2h x.x.x.x etcd-2 -``` - -Check logging of a specific cattle-node-agent pod or all cattle-node-agent pods: - -``` -kubectl -n cattle-system logs -l app=cattle-agent -``` - -#### cattle-cluster-agent - -Check if the cattle-cluster-agent pod is present in the cluster, has status **Running** and doesn't have a high count of Restarts: - -``` -kubectl -n cattle-system get pods -l app=cattle-cluster-agent -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -cattle-cluster-agent-54d7c6c54d-ht9h4 1/1 Running 0 2h x.x.x.x worker-1 -``` - -Check logging of cattle-cluster-agent pod: - -``` -kubectl -n cattle-system logs -l app=cattle-cluster-agent -``` - -# Jobs and Pods - -### Check that pods or jobs have status **Running**/**Completed** - -To check, run the command: - -``` -kubectl get pods --all-namespaces -``` - -If a pod is not in **Running** state, you can dig into the root cause by running: - -### Describe pod - -``` -kubectl describe pod POD_NAME -n NAMESPACE -``` - -### Pod container logs - -``` -kubectl logs POD_NAME -n NAMESPACE -``` - -If a job is not in **Completed** state, you can dig into the root cause by running: - -### Describe job - -``` -kubectl describe job JOB_NAME -n NAMESPACE -``` - -### Logs from the containers of pods of the job - -``` -kubectl logs -l job-name=JOB_NAME -n NAMESPACE -``` - -### Evicted pods - -Pods can be evicted based on [eviction signals](https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/#eviction-policy). - -Retrieve a list of evicted pods (podname and namespace): - -``` -kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' -``` - -To delete all evicted pods: - -``` -kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace delete pod $epod; done -``` - -Retrieve a list of evicted pods, scheduled node and the reason: - -``` -kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace get pod $epod -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,MSG:.status.message; done -``` - -### Job does not complete - -If you have enabled Istio, and you are having issues with a Job you deployed not completing, you will need to add an annotation to your pod using [these steps.](../../cluster-admin/tools/istio/setup/enable-istio-in-namespace/#excluding-workloads-from-being-injected-with-the-istio-sidecar) - -Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. This is a temporary workaround and will disable Istio for any traffic to/from the annotated Pod. Keep in mind this may not allow you to continue to use a Job for integration testing, as the Job will not have access to the service mesh. \ No newline at end of file diff --git a/content/rancher/v2.5/en/troubleshooting/rke/nginx-proxy/_index.md b/content/rancher/v2.5/en/troubleshooting/rke/nginx-proxy/_index.md deleted file mode 100644 index 70505e96280..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/rke/nginx-proxy/_index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Troubleshooting nginx-proxy -weight: 3 ---- - -The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. - -# Check if the Container is Running - -The container is called `nginx-proxy` and should have status `Up`. The duration shown after `Up` is the time the container has been running. - -``` -docker ps -a -f=name=nginx-proxy -``` - -Example output: - -``` -docker ps -a -f=name=nginx-proxy -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -c3e933687c0e rancher/rke-tools:v0.1.15 "nginx-proxy CP_HO..." 3 hours ago Up 3 hours nginx-proxy -``` - -# Check Generated NGINX Configuration - -The generated configuration should include the IP addresses of the nodes with the `controlplane` role. The configuration can be checked using the following command: - -``` -docker exec nginx-proxy cat /etc/nginx/nginx.conf -``` - -Example output: -``` -error_log stderr notice; - -worker_processes auto; -events { - multi_accept on; - use epoll; - worker_connections 1024; -} - -stream { - upstream kube_apiserver { - - server ip_of_controlplane_node1:6443; - - server ip_of_controlplane_node2:6443; - - } - - server { - listen 6443; - proxy_pass kube_apiserver; - proxy_timeout 30; - proxy_connect_timeout 2s; - - } - -} -``` - -# nginx-proxy Container Logging - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs nginx-proxy -``` \ No newline at end of file diff --git a/content/rancher/v2.5/en/troubleshooting/rke/rancher_rke/_index.md b/content/rancher/v2.5/en/troubleshooting/rke/rancher_rke/_index.md deleted file mode 100644 index 5a96d695015..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/rke/rancher_rke/_index.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Troubleshooting Rancher Installed on a Local RKE Cluster -weight: 104 ---- - -The commands/steps listed on this page are intended to be used to troubleshoot an RKE Kubernetes cluster that Rancher is installed on. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml`). - -### Check Rancher pods - -Rancher pods are deployed as a Deployment in the `cattle-system` namespace. - -Check if the pods are running on all nodes: - -``` -kubectl -n cattle-system get pods -l app=rancher -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -rancher-7dbd7875f7-n6t5t 1/1 Running 0 8m x.x.x.x x.x.x.x -rancher-7dbd7875f7-qbj5k 1/1 Running 0 8m x.x.x.x x.x.x.x -rancher-7dbd7875f7-qw7wb 1/1 Running 0 8m x.x.x.x x.x.x.x -``` - -If a pod is unable to run (Status is not **Running**, Ready status is not showing `1/1` or you see a high count of Restarts), check the pod details, logs and namespace events. - -#### Pod details - -``` -kubectl -n cattle-system describe pods -l app=rancher -``` - -#### Pod container logs - -``` -kubectl -n cattle-system logs -l app=rancher -``` - -#### Namespace events - -``` -kubectl -n cattle-system get events -``` - -### Check ingress - -Ingress should have the correct `HOSTS` (showing the configured FQDN) and `ADDRESS` (host address(es) it will be routed to). - -``` -kubectl -n cattle-system get ingress -``` - -Example output: - -``` -NAME HOSTS ADDRESS PORTS AGE -rancher rancher.yourdomain.com x.x.x.x,x.x.x.x,x.x.x.x 80, 443 2m -``` - -### Check ingress controller logs - -When accessing your configured Rancher FQDN does not show you the UI, check the ingress controller logging to see what happens when you try to access Rancher: - -``` -kubectl -n ingress-nginx logs -l app=ingress-nginx -``` - -### Leader election - -The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `cattle-controllers` ConfigMap (in this example, `rancher-7dbd7875f7-qbj5k`). - -``` -kubectl -n kube-system get configmap cattle-controllers -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' -{"holderIdentity":"rancher-7dbd7875f7-qbj5k","leaseDurationSeconds":45,"acquireTime":"2019-04-04T11:53:12Z","renewTime":"2019-04-04T12:24:08Z","leaderTransitions":0} -``` - diff --git a/content/rancher/v2.5/en/troubleshooting/rke/worker-and-generic/_index.md b/content/rancher/v2.5/en/troubleshooting/rke/worker-and-generic/_index.md deleted file mode 100644 index 28ee4499bb4..00000000000 --- a/content/rancher/v2.5/en/troubleshooting/rke/worker-and-generic/_index.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Troubleshooting Worker Nodes and Generic Components -weight: 4 ---- - -This section applies to every node as it includes components that run on nodes with any role. - -# Check if the Containers are Running - -There are two specific containers launched on nodes with the `worker` role: - -* kubelet -* kube-proxy - -The containers should have status `Up`. The duration shown after `Up` is the time the container has been running. - -``` -docker ps -a -f=name='kubelet|kube-proxy' -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -158d0dcc33a5 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-proxy -a30717ecfb55 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kubelet -``` - -# Container Logging - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs kubelet -docker logs kube-proxy -``` diff --git a/content/rancher/v2.5/en/user-preferences/_index.md b/content/rancher/v2.5/en/user-preferences/_index.md deleted file mode 100644 index 2695a5a3129..00000000000 --- a/content/rancher/v2.5/en/user-preferences/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: User Preferences -weight: 18 ---- - -> This page is under construction. - -Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. - -![User Settings Menu]({{}}/img/rancher/user-settings.png) - -The available user settings are: - -- [API & Keys]({{}}/rancher/v2.x/en/user-settings/api-keys/): If you want to interact with Rancher programmatically, you need an API key. Follow the directions in this section to obtain a key. -- [Cloud Credentials]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/): Manage cloud credentials [used by node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) to [provision nodes for clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters). Note: Available as of v2.2.0. -- [Node Templates]({{}}/rancher/v2.x/en/user-settings/node-templates): Manage templates [used by Rancher to provision nodes for clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters). -- [Preferences]({{}}/rancher/v2.x/en/user-settings/preferences): Sets superficial preferences for the Rancher UI. -- Log Out: Ends your user session. - -# Preferences - -Each user can choose preferences to personalize their Rancher experience. To change preference settings, open the **User Settings** menu and then select **Preferences**. - -## Theme - -Choose your background color for the Rancher UI. If you choose **Auto**, the background color changes from light to dark at 6 PM, and then changes back at 6 AM. - -## My Account - -This section displays the **Name** (your display name) and **Username** (your login) used for your session. To change your login's current password, click the **Change Password** button. - -## Table Row per Page - -On pages that display system objects like clusters or deployments in a table, you can set the number of objects that display on the page before you must paginate. The default setting is `50`. diff --git a/content/rancher/v2.x/_index.md b/content/rancher/v2.x/_index.md index f0bb89915cf..fde0e8a648a 100644 --- a/content/rancher/v2.x/_index.md +++ b/content/rancher/v2.x/_index.md @@ -1,4 +1,4 @@ --- -title: v2.0-v2.4 +title: v2.x showBreadcrumb: false --- diff --git a/content/rancher/v2.x/en/_index.md b/content/rancher/v2.x/en/_index.md index 850af59c5f6..685ceec51a5 100644 --- a/content/rancher/v2.x/en/_index.md +++ b/content/rancher/v2.x/en/_index.md @@ -1,17 +1,14 @@ --- -title: "Rancher v2.0-v2.4 Documentation" -shortTitle: "Rancher 2.0-2.4" +title: "Rancher 2.x" +shortTitle: "Rancher 2.x" description: "Rancher adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." -metaTitle: "Rancher 2.0-2.4 Docs" +metaTitle: "Rancher 2.x Docs: What is New?" metaDescription: "Rancher 2 adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." insertOneSix: true weight: 1 ctaBanner: 0 --- - -> The Rancher v2.5 docs are [here.]({{}}/rancher/v2.5/en) - -Rancher was originally built to work with multiple orchestrators, and it included its own orchestrator called Cattle. With the rise of Kubernetes in the marketplace, Rancher 2 exclusively deploys and manages Kubernetes clusters running anywhere, on any provider. +Rancher was originally built to work with multiple orchestrators, and it included its own orchestrator called Cattle. With the rise of Kubernetes in the marketplace, Rancher 2.x exclusively deploys and manages Kubernetes clusters running anywhere, on any provider. Rancher can provision Kubernetes from a hosted provider, provision compute nodes and then install Kubernetes onto them, or import existing Kubernetes clusters running anywhere. diff --git a/content/rancher/v2.x/en/cluster-admin/tools/opa-gatekeper/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/opa-gatekeper/_index.md index 6199ccd4bc7..dceb610f935 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/opa-gatekeper/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/opa-gatekeper/_index.md @@ -47,8 +47,6 @@ When OPA Gatekeeper is enabled, Rancher installs some templates by default. To list the constraint templates installed in the cluster, go to the left side menu under OPA Gatekeeper and click on **Templates.** -For detailed steps on how to create constraints using these default templates, please refer to [this section.](#using-the-default-constraint-templates) - Rancher also provides the ability to create your own constraint templates by importing YAML definitions. # Creating and Configuring Constraints @@ -99,115 +97,3 @@ The detail view of each constraint lists information about the resource that vio **Result:** Upon disabling OPA Gatekeeper, all constraint templates and constraints will also be deleted. -# Using the Default Constraint Templates - -When OPA Gatekeeper is enabled, Rancher installs some templates by default. Currently the following constraint templates are installed: - -1. `k8sallowedrepos` - Template that can be used to whitelist registries. -1. `k8srequiredlabels` - Template that can be used to enforce desired labels on specific Kubernetes objects. - -This section describes how to use these templates to create constraints for enforcing certain policies on the cluster via the **Dashboard** view. - -Currently it is not possible to create a constraint via "Edit as Form" by passing non-scalar parameters, but one can create them by using the **Edit As YAML** option. - -The constraint created can be edited using the form. - -### Constraint to Whitelist Registries - -Suppose you want to apply a policy that restricts creation of any pods in your cluster to use images only from a whitelisted repository. In this case, you would create a constraint from the `k8sallowedrepos` template. - -For example, suppose we want all pods launched in the namespace `test` to use images only from the quay.io registry. - -Steps to do that via Rancher's dashboard view are as follows: - -> **Prerequisite:** OPA Gatekeeper must be enabled using the cluster's Dashboard view. - -1. Navigate to **OPA Gatekeeper > Constraints > Create.** -1. Use the **Edit As YAML** option on the right hand corner of the Create form. Paste the following YAML and click **Create** to add the constraint: - - ```yaml - type: constraints.gatekeeper.sh.k8sallowedrepos - spec: - match: - excludedNamespaces: - - cattle-system - - gatekeeper-system - - ingress-nginx - - kube-node-lease - - kube-public - - kube-system - - security-scan - kinds: - - apiGroups: [""] - kinds: ["Pod"] - namespaces: - - "test" - labelSelector: - matchExpressions: [] - namespaceSelector: - matchExpressions: [] - parameters: - repos: - - "quay.io" - enforcementAction: deny - metadata: - name: test-repo-is-quay-io - annotations: - cattle.io/description: whitelist repo quay.io - ``` - - The YAML specifies the directive `repos` as defined by the `k8sallowedrepos` template schema. - - This constraint specifies that the image repository "quay.io" should be used by all pod objects in the `test` namespace. Notice that all the system namespaces are by default added to the list of `excludedNamespaces`. You can edit the constraint via the form to change the namespaces and other information. - -**Result:** After the above constraint is created, it will be listed under `K8sAllowedRepos` on the **Constraints** page. Now if you navigate **Back to Rancher** and create a workload under the `test` namespace with any image other than the "quay.io" registry, you will get an error from OPA Gatekeeper. - -### Constraint to Enforce Labels - -Suppose you want to apply a policy that requires certain set of labels present on Kubernetes resources of a specific kind, then you can create a constraint from the `k8srequiredlabels` template. - -For example, suppose we want all namespaces in the cluster to be labeled with the name of a team. - -Steps to do that via Rancher's dashboard view are as follows: - -> **Prerequisite:** OPA Gatekeeper must be enabled using the cluster's Dashboard view. - -1. Navigate to OPA Gatekeeper > Constraints > Create. -1. Use the **Edit As YAML** option on the right hand corner of the Create form. Paste the following yaml and click **Create** to add the constraint: - - ```yaml - type: constraints.gatekeeper.sh.k8srequiredlabels - spec: - match: - excludedNamespaces: - - cattle-system - - gatekeeper-system - - ingress-nginx - - kube-node-lease - - kube-public - - kube-system - - security-scan - kinds: - - apiGroups: [""] - kinds: ["Namespace"] - labelSelector: - matchExpressions: [] - namespaceSelector: - matchExpressions: [] - parameters: - message: "All namespaces must have a `team` label that points to your team name" - labels: - - key: team - allowedRegex: "^[a-zA-Z]+$" - enforcementAction: deny - metadata: - name: ns-must-have-label - annotations: - cattle.io/description: constraint for ns label - ``` - - The YAML specifies the directives `message` and `labels` as defined by the `k8srequiredlabels` template schema. This constraint defines that all"namespaces must have a label `team` whose value should match the given "allowedRegex". Notice that all the system namespaces are by default added to list of `excludedNamespaces`. - - You can edit the constraint via the form to change the namespaces and other information. - -**Result:** After the above constraint is created, it will be listed under `K8sRequiredLabels` on the **Constraints** page. Now if you navigate **Back to Rancher** and create a new namespace in the cluster without having the label `team`, the create request should be denied. diff --git a/content/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table/index.md b/content/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table/index.md deleted file mode 100644 index b3560c76223..00000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table/index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -headless: true ---- -| Action | [Rancher launched Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) | [Hosted Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) | [Imported Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters) | -| --- | --- | ---| ---| -| [Using kubectl and a kubeconfig file to Access a Cluster]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) | ✓ | ✓ | ✓ | -| [Managing Cluster Members]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/) | ✓ | ✓ | ✓ | -| [Editing and Upgrading Clusters]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) | ✓ | ✓ | * | -| [Managing Nodes]({{}}/rancher/v2.x/en/cluster-admin/nodes) | ✓ | ✓ | ✓ | -| [Managing Persistent Volumes and Storage Classes]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) | ✓ | ✓ | ✓ | -| [Managing Projects, Namespaces and Workloads]({{}}/rancher/v2.x/en/cluster-admin/projects-and-namespaces/) | ✓ | ✓ | ✓ | -| [Using App Catalogs]({{}}/rancher/v2.x/en/catalog/) | ✓ | ✓ | ✓ | -| [Configuring Tools (Alerts, Notifiers, Logging, Monitoring, Istio)]({{}}/rancher/v2.x/en/cluster-admin/tools/) | ✓ | ✓ | ✓ | -| [Cloning Clusters]({{}}/rancher/v2.x/en/cluster-admin/cloning-clusters/)| ✓ | ✓ | | -| [Ability to rotate certificates]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/) | ✓ | | | -| [Ability to back up your Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/) | ✓ | | | -| [Ability to recover and restore etcd]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/) | ✓ | | | -| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher]({{}}/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/) | ✓ | | | -| [Configuring Pod Security Policies]({{}}/rancher/v2.x/en/cluster-admin/pod-security-policy/) | ✓ | | | -| [Running Security Scans]({{}}/rancher/v2.x/en/security/security-scan/) | ✓ | | | - -/* Cluster configuration options can't be edited for imported clusters, except for [K3s clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/#additional-features-for-imported-k3s-clusters) diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md index e2da323ee7a..5a84152ebef 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md @@ -1,6 +1,6 @@ --- title: Setting up Clusters from Hosted Kubernetes Providers -weight: 2100 +weight: 3 --- In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md index ec106076c14..5bb0f86029f 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md @@ -26,7 +26,7 @@ Rancher needs access to your AWS account in order to provision and administer yo 1. Create a user with programmatic access by following the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). -2. Next, create an IAM policy that defines what this user has access to in your AWS account. The required permissions are [here.]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/#appendix-minimum-eks-permissions) Follow the steps [here](https://docs.aws.amazon.com/eks/latest/userguide/EKS_IAM_user_policies.html) to create an IAM policy and attach it to your user. +2. Next, create an IAM policy that defines what this user has access to in your AWS account. It's important to only grant this user minimal access within your account. Follow the steps [here](https://docs.aws.amazon.com/eks/latest/userguide/EKS_IAM_user_policies.html) to create an IAM policy and attach it to your user. 3. Finally, follow the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) to create an access key and secret key for this user. @@ -263,8 +263,7 @@ Resource targeting uses `*` as the ARN of many of the resources created cannot b "cloudformation:DescribeStackResources", "cloudformation:DescribeStacks", "cloudformation:ListStacks", - "cloudformation:CreateStack", - "cloudformation:DeleteStack" + "cloudformation:CreateStack" ], "Resource": "*" }, diff --git a/content/rancher/v2.x/en/cluster-provisioning/imported-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/imported-clusters/_index.md index 8f59cbb3348..5f37d312224 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/imported-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/imported-clusters/_index.md @@ -1,9 +1,9 @@ --- -title: Importing Existing Clusters into Rancher +title: Importing Existing Clusters description: Learn how you can create a cluster in Rancher by importing an existing Kubernetes cluster. Then, you can manage it using Rancher metaTitle: 'Kubernetes Cluster Management' metaDescription: 'Learn how you can import an existing Kubernetes cluster and then manage it using Rancher' -weight: 2300 +weight: 5 aliases: - /rancher/v2.x/en/tasks/clusters/import-cluster/ --- diff --git a/content/rancher/v2.x/en/cluster-provisioning/production/_index.md b/content/rancher/v2.x/en/cluster-provisioning/production/_index.md index d5ab40db6fd..2aea2df7329 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/production/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/production/_index.md @@ -1,6 +1,6 @@ --- title: Checklist for Production-Ready Clusters -weight: 2005 +weight: 2 --- In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. diff --git a/content/rancher/v2.x/en/cluster-provisioning/registering/_index.md b/content/rancher/v2.x/en/cluster-provisioning/registering/_index.md new file mode 100644 index 00000000000..ee7821d0029 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/registering/_index.md @@ -0,0 +1,4 @@ +--- +title: Registering K3s, Rancher or EKS Clusters +weight: 6 +--- \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md index 111b0a58faa..ce7512f5ba7 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md @@ -1,6 +1,6 @@ --- title: Launching Kubernetes with Rancher -weight: 2200 +weight: 4 --- You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: diff --git a/content/rancher/v2.x/en/dashboard/_index.md b/content/rancher/v2.x/en/dashboard/_index.md new file mode 100644 index 00000000000..15c9ffb624f --- /dev/null +++ b/content/rancher/v2.x/en/dashboard/_index.md @@ -0,0 +1,4 @@ +--- +title: Dashboard +weight: 5000 +--- \ No newline at end of file diff --git a/content/rancher/v2.x/en/dashboard/backup-restore/_index.md b/content/rancher/v2.x/en/dashboard/backup-restore/_index.md new file mode 100644 index 00000000000..e3e41e6e16b --- /dev/null +++ b/content/rancher/v2.x/en/dashboard/backup-restore/_index.md @@ -0,0 +1,4 @@ +--- +title: Backup and Restore +weight: 7 +--- \ No newline at end of file diff --git a/content/rancher/v2.x/en/dashboard/cis-scans/_index.md b/content/rancher/v2.x/en/dashboard/cis-scans/_index.md new file mode 100644 index 00000000000..15ced365507 --- /dev/null +++ b/content/rancher/v2.x/en/dashboard/cis-scans/_index.md @@ -0,0 +1,4 @@ +--- +title: CIS Security Scans +weight: 6 +--- diff --git a/content/rancher/v2.x/en/dashboard/cluster-explorer/_index.md b/content/rancher/v2.x/en/dashboard/cluster-explorer/_index.md new file mode 100644 index 00000000000..006d87e6e3b --- /dev/null +++ b/content/rancher/v2.x/en/dashboard/cluster-explorer/_index.md @@ -0,0 +1,4 @@ +--- +title: Cluster Explorer +weight: 1 +--- \ No newline at end of file diff --git a/content/rancher/v2.x/en/dashboard/istio/_index.md b/content/rancher/v2.x/en/dashboard/istio/_index.md new file mode 100644 index 00000000000..fb765119eb0 --- /dev/null +++ b/content/rancher/v2.x/en/dashboard/istio/_index.md @@ -0,0 +1,4 @@ +--- +title: Istio +weight: 4 +--- \ No newline at end of file diff --git a/content/rancher/v2.5/en/logging/_index.md b/content/rancher/v2.x/en/dashboard/logging/_index.md similarity index 94% rename from content/rancher/v2.5/en/logging/_index.md rename to content/rancher/v2.x/en/dashboard/logging/_index.md index 41cca5d4138..cdd2711b5e6 100644 --- a/content/rancher/v2.5/en/logging/_index.md +++ b/content/rancher/v2.x/en/dashboard/logging/_index.md @@ -3,12 +3,12 @@ title: Rancher Integration with Logging Services shortTitle: Logging description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. metaDescription: "Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster." -weight: 9 +weight: 3 --- - [Changes in Rancher v2.5](#changes-in-rancher-v2-5) - [Configuring the Logging Output for the Rancher Kubernetes Cluster](#configuring-the-logging-output-for-the-rancher-kubernetes-cluster) -- [Enabling Logging for Rancher Managed Clusters with ECM](#enabling-logging-for-rancher-managed-clusters-with-ecm) +- [Enabling Logging for Rancher Managed Clusters](#enabling-logging-for-rancher-managed-clusters) - [Configuring the Logging Application](#configuring-the-logging-application) @@ -35,7 +35,7 @@ If you install Rancher as a Helm chart, you'll configure the Helm chart options If you install Rancher using the Rancher CLI on an Linux OS, the Rancher Helm chart will be installed on a Kubernetes cluster with default options. Then when the Rancher UI is available, you'll enable the logging app from the Apps section of the UI. Then during the process of installing the logging application, you will configure the logging output. -### Enabling Logging for Rancher Managed Clusters with ECM +### Enabling Logging for Rancher Managed Clusters If you have Enterprise Cluster Manager enabled, you can enable the logging for a Rancher managed cluster by going to the Apps page and installing the logging app. diff --git a/content/rancher/v2.5/en/monitoring/_index.md b/content/rancher/v2.x/en/dashboard/monitoring-alerting/_index.md similarity index 52% rename from content/rancher/v2.5/en/monitoring/_index.md rename to content/rancher/v2.x/en/dashboard/monitoring-alerting/_index.md index fcfeb1b2d08..dce5a8af638 100644 --- a/content/rancher/v2.5/en/monitoring/_index.md +++ b/content/rancher/v2.x/en/dashboard/monitoring-alerting/_index.md @@ -1,8 +1,8 @@ --- title: Integrating Rancher and Prometheus for Cluster Monitoring -shortTitle: Monitoring +shortTitle: Monitoring/Alerting description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring -weight: 10 +weight: 2 --- Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. @@ -15,21 +15,25 @@ This section covers the following topics: - [About Prometheus](#about-prometheus) - [Monitoring scope](#monitoring-scope) - [Enabling cluster monitoring](#enabling-cluster-monitoring) -- [Resource consumption](#resource-consumption) - - [Resource consumption of Prometheus pods](#resource-consumption-of-prometheus-pods) - - [Resource consumption of other pods](#resources-consumption-of-other-pods) +- [Configuration](#configuration) +- [Examples](#examples) + - [Create ServiceMonitor Custom Resource](#create-servicemonitor-custom-resource) + - [PodMonitor](#podmonitor) + - [PrometheusRule](#prometheusrule) + - [Alertmanager Config](#alertmanager-config) + - [Configuring a Persistent Grafana Dashboard](#configuring-a-persistent-grafana-dashboard) + - [Configuring Grafana to Use Multiple Data Sources](#configuring-grafana-to-use-multiple-data-sources) + # Changes in Rancher v2.5 -Rancher's monitoring application is now powered by the Prometheus operator and relies less on Rancher's in-house monitoring tools. +Rancher's monitoring application is powered by the Prometheus operator, and it now relies less on Rancher's in-house monitoring tools. -This change allows Rancher to automatically support new features of the Prometheus operator API. +This change allows Rancher to automatically support new features of the Prometheus operator API. Now all of the features exposed by the upstream Prometheus operator are available in the monitoring application, and you have more flexibility to configure monitoring. Previously, you would use the Rancher UI to configure monitoring. The Rancher UI created CRDs that were maintained by Rancher and updated the Prometheus state. In Rancher v2.5, you directly create CRDs for the monitoring application, and those CRDs are exposed in the Rancher UI. -For information on configuring custom Prometheus metrics and alerting rules, refer to the upstream documentation for the [Prometheus operator.](https://github.com/prometheus-operator/prometheus-operator) This documentation can also help you set up RBAC, Thanos, or custom configuration. - -The Rancher monitoring application's Helm chart comes with a README that provides documentation. If you want to set up monitoring with advanced features, you can enable them when deploying the application. +The differences between Rancher's monitoring feature and the upstream Prometheus operator can be found in the [changelog.](https://github.com/rancher/charts/blob/dev-v2.5/packages/rancher-monitoring/overlay/CHANGELOG.md) # About Prometheus @@ -56,3 +60,39 @@ As an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-p > **Prerequisite:** Make sure that you are allowing traffic on port 9796 for each of your nodes because Prometheus will scrape metrics from here. > The default username and password for the Grafana instance will be `admin/admin`. However, Grafana dashboards are served via the Rancher authentication proxy, so only users who are currently authenticated into the Rancher server have access to the Grafana dashboard. + +# Configuration + +For information on configuring custom Prometheus metrics and alerting rules, refer to the upstream documentation for the [Prometheus operator.](https://github.com/prometheus-operator/prometheus-operator) This documentation can help you set up RBAC, Thanos, or custom configuration. + +To create an additional scrape configuration, refer to [this page.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/additional-scrape-config.md) + +# Examples + +### Create ServiceMonitor Custom Resource + +An example ServiceMonitor custom resource can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + +### PodMonitor + +An example PodMonitor can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/example-app-pod-monitor.yaml) and an example Prometheus resource that refers to it can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/prometheus-pod-monitor.yaml) + +### PrometheusRule + +Prometheus rule files are held in PrometheusRule custom resources. Use the label selector field ruleSelector in the Prometheus object to define the rule files that you want to be mounted into Prometheus. An example PrometheusRule is on [this page.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md) + +### Alertmanager Config + +The Prometheus Operator introduces an Alertmanager resource, which allows users to declaratively describe an Alertmanager cluster. + +The upstream Prometheus documentation includes information on how to [set up](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md) and [configure](https://prometheus.io/docs/alerting/latest/configuration/) Alertmanager. + +### Configuring a Persistent Grafana Dashboard + +To allow the Grafana dashboard to persist after it restarts, you will need to add the configuration JSON into a ConfigMap. + +You can add this configuration to the ConfigMap using the Rancher UI. + +### Configuring Grafana to Use Multiple Data Sources + +The data from Prometheus is used as the data source for the Grafana dashboard. Multiple data sources can be configured for Grafana. \ No newline at end of file diff --git a/content/rancher/v2.x/en/dashboard/opa/_index.md b/content/rancher/v2.x/en/dashboard/opa/_index.md new file mode 100644 index 00000000000..3ea91a4c4c9 --- /dev/null +++ b/content/rancher/v2.x/en/dashboard/opa/_index.md @@ -0,0 +1,4 @@ +--- +title: OPA +weight: 5 +--- \ No newline at end of file diff --git a/content/rancher/v2.x/en/dashboard/repos/_index.md b/content/rancher/v2.x/en/dashboard/repos/_index.md new file mode 100644 index 00000000000..6e952ef3484 --- /dev/null +++ b/content/rancher/v2.x/en/dashboard/repos/_index.md @@ -0,0 +1,4 @@ +--- +title: Repos +weight: 8 +--- \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/k8s-install/helm-rancher/_index.md b/content/rancher/v2.x/en/installation/k8s-install/helm-rancher/_index.md index be3c42f9c0c..74aa80b7789 100644 --- a/content/rancher/v2.x/en/installation/k8s-install/helm-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/k8s-install/helm-rancher/_index.md @@ -89,7 +89,7 @@ These instructions are adapted from the [official cert-manager documentation](ht ``` # Install the CustomResourceDefinition resources separately -kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.14/deploy/manifests/00-crds.yaml +kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.15.0/cert-manager.crds.yaml # **Important:** # If you are running Kubernetes v1.15 or below, you @@ -114,11 +114,7 @@ helm repo update helm install \ cert-manager jetstack/cert-manager \ --namespace cert-manager \ -<<<<<<< HEAD - --version v0.14.2 -======= --version v0.15.0 ->>>>>>> jetstack cert-manager doc update ``` Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: diff --git a/content/rancher/v2.x/en/installation/options/air-gap-helm2/install-rancher/_index.md b/content/rancher/v2.x/en/installation/options/air-gap-helm2/install-rancher/_index.md index 1767e4b7eb6..c1798f6ac09 100644 --- a/content/rancher/v2.x/en/installation/options/air-gap-helm2/install-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/options/air-gap-helm2/install-rancher/_index.md @@ -87,12 +87,12 @@ By default, Rancher generates a CA and uses cert-manager to issue the certificat 1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). ```plain - helm fetch jetstack/cert-manager --version v0.14.2 + helm fetch jetstack/cert-manager --version v0.12.0 ``` 1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. ```plain - helm template ./cert-manager-v0.14.2.tgz --output-dir . \ + helm template ./cert-manager-v0.12.0.tgz --output-dir . \ --name cert-manager --namespace cert-manager \ --set image.repository=/quay.io/jetstack/cert-manager-controller --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook @@ -101,7 +101,7 @@ By default, Rancher generates a CA and uses cert-manager to issue the certificat 1. Download the required CRD file for cert-manager ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.14/deploy/manifests/00-crds.yaml + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml ``` 1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. diff --git a/content/rancher/v2.x/en/installation/options/air-gap-helm2/populate-private-registry/_index.md b/content/rancher/v2.x/en/installation/options/air-gap-helm2/populate-private-registry/_index.md index d5f39548c7c..b96ca100b47 100644 --- a/content/rancher/v2.x/en/installation/options/air-gap-helm2/populate-private-registry/_index.md +++ b/content/rancher/v2.x/en/installation/options/air-gap-helm2/populate-private-registry/_index.md @@ -58,7 +58,7 @@ In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS ```plain helm repo add jetstack https://charts.jetstack.io helm repo update - helm fetch jetstack/cert-manager --version v0.14.2 + helm fetch jetstack/cert-manager --version v0.12.0 helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt ``` @@ -220,7 +220,7 @@ The workstation must have Docker 18.02+ in order to support manifests, which are ```plain helm repo add jetstack https://charts.jetstack.io helm repo update - helm fetch jetstack/cert-manager --version v0.14.2 + helm fetch jetstack/cert-manager --version v0.12.0 helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt ``` diff --git a/content/rancher/v2.x/en/installation/options/chart-options/_index.md b/content/rancher/v2.x/en/installation/options/chart-options/_index.md index e03b089d8b4..5cec85b61e6 100644 --- a/content/rancher/v2.x/en/installation/options/chart-options/_index.md +++ b/content/rancher/v2.x/en/installation/options/chart-options/_index.md @@ -139,7 +139,7 @@ If you have private registries, catalogs or a proxy that intercepts certificates Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. ```plain -kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem +kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem=./ca-additional.pem ``` ### Private Registry and Air Gap Installs diff --git a/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/_index.md b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/_index.md index 3aba76da34c..ddd8d3db46e 100644 --- a/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/_index.md @@ -79,7 +79,7 @@ These instructions are adapted from the [official cert-manager documentation](ht helm install \ --name cert-manager \ --namespace cert-manager \ - --version v0.14.2 \ + --version v0.12.0 \ jetstack/cert-manager ``` diff --git a/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/_index.md b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/_index.md index e773074fc13..261365488ac 100644 --- a/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/_index.md @@ -135,7 +135,7 @@ If you have private registries, catalogs or a proxy that intercepts certificates Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. ```plain -kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem +kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem=./ca-additional.pem ``` ### Private Registry and Air Gap Installs diff --git a/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/tls-secrets/_index.md b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/tls-secrets/_index.md index 403cb958356..65a9b8435fa 100644 --- a/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/tls-secrets/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/tls-secrets/_index.md @@ -27,9 +27,7 @@ If you are using a private CA, Rancher requires a copy of the CA certificate whi Copy the CA certificate into a file named `cacerts.pem` and use `kubectl` to create the `tls-ca` secret in the `cattle-system` namespace. ->**Important:** Make sure the file is called `cacerts.pem` as Rancher uses that filename to configure the CA certificate. - ``` kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem + --from-file=cacerts.pem=./cacerts.pem ``` diff --git a/content/rancher/v2.x/en/installation/options/tls-secrets/_index.md b/content/rancher/v2.x/en/installation/options/tls-secrets/_index.md index 933bd4d5645..9693d584b0b 100644 --- a/content/rancher/v2.x/en/installation/options/tls-secrets/_index.md +++ b/content/rancher/v2.x/en/installation/options/tls-secrets/_index.md @@ -28,11 +28,9 @@ If you are using a private CA, Rancher requires a copy of the CA certificate whi Copy the CA certificate into a file named `cacerts.pem` and use `kubectl` to create the `tls-ca` secret in the `cattle-system` namespace. ->**Important:** Make sure the file is called `cacerts.pem` as Rancher uses that filename to configure the CA certificate. - ``` kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem + --from-file=cacerts.pem=./cacerts.pem ``` > **Note:** The configured `tls-ca` secret is retrieved when Rancher starts. On a running Rancher installation the updated CA will take effect after new Rancher pods are started. diff --git a/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/_index.md b/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/_index.md index e0afe6897d1..2f224f311b3 100644 --- a/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/_index.md +++ b/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/_index.md @@ -8,7 +8,6 @@ Rancher uses cert-manager to automatically generate and renew TLS certificates f 1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) 1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). This change has no exact deadline. 1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) -1. [Cert-manager is changing the Deployment selector](https://cert-manager.io/docs/installation/upgrading/upgrading-0.13-0.14/) To address these changes, this guide will do two things: @@ -19,21 +18,14 @@ To address these changes, this guide will do two things: > If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: > 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server -> 2. Uninstall Rancher, cert-manager and the CustomResourceDefinition for cert-manager -> 3. Install the new CustomResourceDefinition for cert-manager, cert-manager and the newer version of Rancher +> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager +> 3. Install the newer version of Rancher and cert-manager > The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. > For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart]({{}}/rancher/v2.x/en/upgrades/upgrades/ha/#c-upgrade-rancher) under the upgrade Rancher section. -## Upgrade Cert-Manager Only - -If you are running cert-manager version 0.11.x or above, please follow official [upgrading doc](https://cert-manager.io/docs/installation/upgrading/#upgrading-with-helm) to upgrade it using helm. - -Continue reading if you are coming from a cert-manager version older than 0.11 - -> **Note:** -> These instructions are applied if you have no plan to upgrade Rancher. +## Upgrade Cert-Manager The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. @@ -67,7 +59,7 @@ In order to upgrade cert-manager, follow these instructions: 1. Install the CustomResourceDefinition resources separately ```plain - kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.14/deploy/manifests/00-crds.yaml + kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml ``` > **Note:** @@ -97,7 +89,7 @@ In order to upgrade cert-manager, follow these instructions: helm install \ cert-manager jetstack/cert-manager \ --namespace cert-manager \ - --version v0.14.2 + --version v0.12.0 ``` 1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) @@ -125,7 +117,7 @@ Before you can perform the upgrade, you must prepare your air gapped environment 1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). ```plain - helm fetch jetstack/cert-manager --version v0.14.2 + helm fetch jetstack/cert-manager --version v0.12.0 ``` 1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. @@ -133,7 +125,7 @@ Before you can perform the upgrade, you must prepare your air gapped environment The Helm 3 command is as follows: ```plain - helm template cert-manager ./cert-manager-v0.14.2.tgz --output-dir . \ + helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ --namespace cert-manager \ --set image.repository=/quay.io/jetstack/cert-manager-controller --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook @@ -143,7 +135,7 @@ Before you can perform the upgrade, you must prepare your air gapped environment The Helm 2 command is as follows: ```plain - helm template ./cert-manager-v0.14.2.tgz --output-dir . \ + helm template ./cert-manager-v0.12.0.tgz --output-dir . \ --name cert-manager --namespace cert-manager \ --set image.repository=/quay.io/jetstack/cert-manager-controller --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook @@ -153,7 +145,7 @@ Before you can perform the upgrade, you must prepare your air gapped environment 1. Download the required CRD file for cert-manager (old and new) ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.14/deploy/manifests/00-crds.yaml + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml curl -L -o cert-manager/cert-manager-crd-old.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml ``` @@ -238,7 +230,5 @@ We have also removed support for the old configuration format that was deprecate Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). -Details about the change and migration instructions can be found in the [cert-manager v0.13 to v0.14 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.13-0.14/). - More info about [cert-manager upgrade information](https://cert-manager.io/docs/installation/upgrading/). diff --git a/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions/_index.md b/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions/_index.md index bb3e77d418c..4aee1733f98 100644 --- a/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions/_index.md +++ b/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions/_index.md @@ -8,7 +8,6 @@ Rancher uses cert-manager to automatically generate and renew TLS certificates f 1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) 1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.7-0.8.html#upgrading-from-v0-7-to-v0-8). This change has no exact deadline. 1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) -1. [Cert-manager is changing the Deployment selector](https://cert-manager.io/docs/installation/upgrading/upgrading-0.13-0.14/) To address these changes, this guide will do two things: @@ -28,10 +27,6 @@ To address these changes, this guide will do two things: ## Upgrade Cert-Manager Only -If you are running cert-manager version 0.11.x or above, please follow official [upgrading doc](https://cert-manager.io/docs/installation/upgrading/#upgrading-with-helm) to upgrade it using helm. - -Continue reading if you are comming from a cert-manager version older than 0.11 - > **Note:** > These instructions are applied if you have no plan to upgrade Rancher. @@ -73,7 +68,7 @@ In order to upgrade cert-manager, follow these instructions: 1. Install the new version of cert-manager ```plain - helm install --version 0.14.2 --name cert-manager --namespace kube-system jetstack/cert-manager + helm install --version 0.12.0 --name cert-manager --namespace kube-system jetstack/cert-manager ``` {{% /accordion %}} @@ -94,13 +89,13 @@ Before you can perform the upgrade, you must prepare your air gapped environment 1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). ```plain - helm fetch jetstack/cert-manager --version v0.14.2 + helm fetch jetstack/cert-manager --version v0.12.0 ``` 1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. ```plain - helm template ./cert-manager-v0.14.2.tgz --output-dir . \ + helm template ./cert-manager-v0.12.0.tgz --output-dir . \ --name cert-manager --namespace kube-system \ --set image.repository=/quay.io/jetstack/cert-manager-controller --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook @@ -174,6 +169,4 @@ We have also removed support for the old configuration format that was deprecate Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). -Details about the change and migration instructions can be found in the [cert-manager v0.13 to v0.14 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.13-0.14/). - For information on upgrading from all other versions of cert-manager, refer to the [official documentation](https://cert-manager.io/docs/installation/upgrading/). diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/_index.md index 08505aafb3a..80281d3e061 100644 --- a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/_index.md @@ -85,12 +85,12 @@ By default, Rancher generates a CA and uses cert-manager to issue the certificat 1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). ```plain - helm fetch jetstack/cert-manager --version v0.14.2 + helm fetch jetstack/cert-manager --version v0.12.0 ``` 1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. ```plain - helm template cert-manager ./cert-manager-v0.14.2.tgz --output-dir . \ + helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ --namespace cert-manager \ --set image.repository=/quay.io/jetstack/cert-manager-controller \ --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ @@ -99,7 +99,7 @@ By default, Rancher generates a CA and uses cert-manager to issue the certificat 1. Download the required CRD file for cert-manager ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.14/deploy/manifests/00-crds.yaml + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml ``` 1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. @@ -204,9 +204,10 @@ Install Rancher: kubectl create namespace cattle-system kubectl -n cattle-system apply -R -f ./rancher ``` - **Step Result:** If you are installing Rancher v2.3.0+, the installation is complete. +> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.x/en/faq/telemetry/) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. + ### E. For Rancher versions prior to v2.3.0, Configure System Charts If you are installing Rancher versions prior to v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0). @@ -326,6 +327,8 @@ docker run -d --restart=unless-stopped \ If you are installing Rancher v2.3.0+, the installation is complete. +> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.x/en/faq/telemetry/) during the initial login. + If you are installing Rancher versions prior to v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0). {{% /tab %}} diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md index 346e7bd8304..6cef213e1ae 100644 --- a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md +++ b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md @@ -59,7 +59,7 @@ In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS ```plain helm repo add jetstack https://charts.jetstack.io helm repo update - helm fetch jetstack/cert-manager --version v0.14.2 + helm fetch jetstack/cert-manager --version v0.12.0 helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt ``` @@ -226,7 +226,7 @@ The workstation must have Docker 18.02+ in order to support manifests, which are ```plain helm repo add jetstack https://charts.jetstack.io helm repo update - helm fetch jetstack/cert-manager --version v0.14.2 + helm fetch jetstack/cert-manager --version v0.12.0 helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt ``` diff --git a/content/rancher/v2.x/en/installation/requirements/installing-docker/_index.md b/content/rancher/v2.x/en/installation/requirements/installing-docker/_index.md index ac20da0afe8..b9f42e85179 100644 --- a/content/rancher/v2.x/en/installation/requirements/installing-docker/_index.md +++ b/content/rancher/v2.x/en/installation/requirements/installing-docker/_index.md @@ -9,10 +9,10 @@ There are a couple of options for installing Docker. One option is to refer to t Another option is to use one of Rancher's Docker installation scripts, which are available for most recent versions of Docker. -For example, this command could be used to install Docker 18.09 on Ubuntu: +For example, this command could be used to install Docker 19.03 on Ubuntu: ``` -curl https://releases.rancher.com/install-docker/18.09.sh | sh +curl https://releases.rancher.com/install-docker/19.03.sh | sh ``` -To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher's Docker installation scripts. \ No newline at end of file +Rancher has installation scripts for every version of upstream Docker that Kubernetes supports. To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher's Docker installation scripts. diff --git a/content/rancher/v2.x/en/security/benchmark-2.3.5/_index.md b/content/rancher/v2.x/en/security/benchmark-2.3.5/_index.md index 2d836d12fc0..a67a0c6cbad 100644 --- a/content/rancher/v2.x/en/security/benchmark-2.3.5/_index.md +++ b/content/rancher/v2.x/en/security/benchmark-2.3.5/_index.md @@ -3,7 +3,7 @@ title: CIS Benchmark Rancher Self-Assessment Guide - v2.3.5 weight: 205 --- -### CIS Kubernetes Benchmark 1.5 - Rancher 2.3.5 with Kubernetes 1.15 +### CIS Kubernetes Benchmark v1.5 - Rancher v2.3.5 with Kubernetes v1.15 [Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.5/Rancher_Benchmark_Assessment.pdf) @@ -1530,31 +1530,99 @@ RKE doesn’t require or maintain a configuration file for the kubelet service. #### 4.1.3 Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) -**Result:** Not Applicable +**Result:** PASS **Remediation:** -RKE doesn’t require or maintain a configuration file for the proxy service. All configuration is passed in as arguments at container run time. +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected result**: + +``` +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` #### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) -**Result:** Not Applicable +**Result:** PASS **Remediation:** -RKE doesn’t require or maintain a configuration file for the proxy service. All configuration is passed in as arguments at container run time. +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected result**: + +``` +'root:root' is present +``` #### 4.1.5 Ensure that the kubelet.conf file permissions are set to `644` or more restrictive (Scored) -**Result:** Not Applicable +**Result:** PASS **Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected result**: + +``` +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` #### 4.1.6 Ensure that the kubelet.conf file ownership is set to `root:root` (Scored) -**Result:** Not Applicable +**Result:** PASS **Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected result**: + +``` +'root:root' is equal to 'root:root' +``` #### 4.1.7 Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) @@ -1975,7 +2043,7 @@ systemctl restart kubelet.service #### 5.1.5 Ensure that default service accounts are not actively used. (Scored) -**Result:** FAIL +**Result:** PASS **Remediation:** Create explicit service accounts wherever a Kubernetes workload requires specific access @@ -2001,13 +2069,20 @@ fi accounts="$(kubectl --kubeconfig=${KUBECONFIG} get serviceaccounts -A -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true)) | "fail \(.metadata.name) \(.metadata.namespace)"')" -if [[ "${accounts}" == "" ]]; then - echo "--pass" - exit 0 +if [[ "${accounts}" != "" ]]; then + echo "fail: automountServiceAccountToken not false for accounts: ${accounts}" + exit 1 fi -echo ${accounts} -exit 1 +default_binding="$(kubectl get rolebindings,clusterrolebindings -A -o json | jq -r '.items[] | select(.subjects[].kind=="ServiceAccount" and .subjects[].name=="default" and .metadata.name=="default").metadata.uid' | wc -l)" + +if [[ "${default_binding}" -gt 0 ]]; then + echo "fail: default service accounts have non default bindings" + exit 1 +fi + +echo "--pass" +exit 0 ``` **Audit Execution:** diff --git a/content/rancher/v2.x/en/security/benchmark-2.4/_index.md b/content/rancher/v2.x/en/security/benchmark-2.4/_index.md index e5a16487085..672fbba7826 100644 --- a/content/rancher/v2.x/en/security/benchmark-2.4/_index.md +++ b/content/rancher/v2.x/en/security/benchmark-2.4/_index.md @@ -1530,31 +1530,99 @@ RKE doesn’t require or maintain a configuration file for the kubelet service. #### 4.1.3 Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) -**Result:** Not Applicable +**Result:** PASS **Remediation:** -RKE doesn’t require or maintain a configuration file for the proxy service. All configuration is passed in as arguments at container run time. +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected result**: + +``` +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` #### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) -**Result:** Not Applicable +**Result:** PASS **Remediation:** -RKE doesn’t require or maintain a configuration file for the proxy service. All configuration is passed in as arguments at container run time. +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected result**: + +``` +'root:root' is present +``` #### 4.1.5 Ensure that the kubelet.conf file permissions are set to `644` or more restrictive (Scored) -**Result:** Not Applicable +**Result:** PASS **Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected result**: + +``` +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` #### 4.1.6 Ensure that the kubelet.conf file ownership is set to `root:root` (Scored) -**Result:** Not Applicable +**Result:** PASS **Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected result**: + +``` +'root:root' is equal to 'root:root' +``` #### 4.1.7 Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) @@ -1813,7 +1881,7 @@ systemctl restart kubelet.service **Expected result**: ``` -'1800s' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present +'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present ``` #### 4.2.6 Ensure that the ```--protect-kernel-defaults``` argument is set to `true` (Scored) @@ -1975,7 +2043,7 @@ systemctl restart kubelet.service #### 5.1.5 Ensure that default service accounts are not actively used. (Scored) -**Result:** FAIL +**Result:** PASS **Remediation:** Create explicit service accounts wherever a Kubernetes workload requires specific access @@ -2006,7 +2074,7 @@ if [[ "${accounts}" != "" ]]; then exit 1 fi -default_binding="$(kubectl get rolebindings,clusterrolebindings -A -o json | jq -r '.items[] | select(.subjects[].kind=="ServiceAccount" and .subjects[].name=="default" and .metadata.name!="default").metadata.uid' | wc -l)" +default_binding="$(kubectl get rolebindings,clusterrolebindings -A -o json | jq -r '.items[] | select(.subjects[].kind=="ServiceAccount" and .subjects[].name=="default" and .metadata.name=="default").metadata.uid' | wc -l)" if [[ "${default_binding}" -gt 0 ]]; then echo "fail: default service accounts have non default bindings" diff --git a/content/rancher/v2.x/en/security/hardening-2.3/_index.md b/content/rancher/v2.x/en/security/hardening-2.3/_index.md index f237643c192..fb495c04b2a 100644 --- a/content/rancher/v2.x/en/security/hardening-2.3/_index.md +++ b/content/rancher/v2.x/en/security/hardening-2.3/_index.md @@ -608,22 +608,20 @@ To pass the following controls for the kube-api server ensure RKE configuration services: kube-api: pod_security_policy: true + event_rate_limit: + enabled: true extra_args: anonymous-auth: "false" profiling: "false" service-account-lookup: "true" enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - encryption-provider-config: /opt/kubernetes/encryption.yaml - admission-control-config-file: "/opt/kubernetes/admission.yaml" audit-log-path: "/var/log/kube-audit/audit-log.json" audit-log-maxage: "5" audit-log-maxbackup: "5" audit-log-maxsize: "100" audit-log-format: "json" - audit-policy-file: /opt/kubernetes/audit.yaml tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" extra_binds: - - "/var/log/kube-audit:/var/log/kube-audit" - "/opt/kubernetes:/opt/kubernetes" ``` @@ -1269,13 +1267,13 @@ services: generate_serving_certificate: true kube-api: pod_security_policy: true + event_rate_limit: + enabled: true extra_args: anonymous-auth: "false" profiling: "false" service-account-lookup: "true" enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - encryption-provider-config: /opt/kubernetes/encryption.yaml - admission-control-config-file: "/opt/kubernetes/admission.yaml" audit-log-path: "/var/log/kube-audit/audit-log.json" audit-log-maxage: "5" audit-log-maxbackup: "5" @@ -1284,7 +1282,6 @@ services: audit-policy-file: /opt/kubernetes/audit.yaml tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" extra_binds: - - "/var/log/kube-audit:/var/log/kube-audit" - "/opt/kubernetes:/opt/kubernetes" scheduler: extra_args: @@ -1438,6 +1435,7 @@ addons: | # # Cluster Config # +default_pod_security_policy_template_id: restricted docker_root_dir: /var/lib/docker enable_cluster_alerting: false enable_cluster_monitoring: false @@ -1500,24 +1498,22 @@ rancher_kubernetes_engine_config: uid: 1001 kube_api: always_pull_images: false + event_rate_limit: + enabled: true extra_args: - admission-control-config-file: /opt/kubernetes/admission.yaml anonymous-auth: 'false' audit-log-format: json audit-log-maxage: '5' audit-log-maxbackup: '5' audit-log-maxsize: '100' audit-log-path: /var/log/kube-audit/audit-log.json - audit-policy-file: /opt/kubernetes/audit.yaml enable-admission-plugins: >- ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy - encryption-provider-config: /opt/kubernetes/encryption.yaml profiling: 'false' service-account-lookup: 'true' tls-cipher-suites: >- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 extra_binds: - - '/var/log/kube-audit:/var/log/kube-audit' - '/opt/kubernetes:/opt/kubernetes' pod_security_policy: true service_node_port_range: 30000-32767